# with a caching mechanism for keys and values to avoid a O(N^3) cost
# for auto-regression.
+# This implementation is equipped with RNN layers to replace the MHA
+
import math, warnings
import torch, einops
# 1 for the successive tokens.
#
# Modules able to process brackets may implement a cache that is
-# resetted when the input bracket starts at t=0
+# resetted when init_cache is True
class BracketedSequence:
import pscan
-
# X is /.../xTxD A is /.../xT Y_init is /.../xD
return Y
+def pscan_rgrad(grad_Y, A, X, Y_init, dim=-2, eps=1e-2):
+ with torch.no_grad():
+ s_A, s_X = 0, 0
+ for t in range(X.size(dim) - 1, 0, -1):
+ delta = (grad_Y[t] - s_A) / A[t].grad
+ s_A += A[t].grad * delta
+ A[t].grad = delta
+ delta = (grad_Y[t] - s_X) / X[t].grad
+ s_X += X[t].grad * delta
+ X[t].grad = delta
+
+
def pscan_shape(A, X, Y_init):
s = X.size()
A = A.reshape(-1, s[-2])
class DumbRec(nn.Module):
def __init__(
self,
- dim_in,
+ dim_model,
dim_qk,
dim_v,
nb_heads,
nb_lines,
attention_dropout=0.0,
len_max=1e5,
+ logger=print,
+ **kwargs,
):
super().__init__()
self.k_star = randw(nb_lines, dim_qk)
- self.w_qw = randw(nb_heads, dim_qk, dim_in)
- self.w_qr = randw(nb_heads, dim_qk, dim_in)
- # self.w_k = randw(nb_heads, dim_qk, dim_in)
- self.w_v = randw(nb_heads, dim_v, dim_in)
- self.w_o = randw(dim_v * nb_heads, dim_in)
+ self.w_qw = randw(nb_heads, dim_qk, dim_model)
+ self.w_qr = randw(nb_heads, dim_qk, dim_model)
+ # self.w_k = randw(nb_heads, dim_qk, dim_model)
+ self.w_v = randw(nb_heads, dim_v, dim_model)
+ self.w_o = randw(dim_v * nb_heads, dim_model)
def reset_inner_loss(self):
self.acc_attention = 0
class KVRec(nn.Module):
def __init__(
self,
- dim_in,
+ dim_model,
dim_qk,
dim_v,
nb_heads,
nb_lines,
attention_dropout=0.0,
len_max=1e5,
+ logger=print,
+ **kwargs,
):
super().__init__()
self.k_star = randw(nb_lines, dim_qk)
- self.w_qw = randw(nb_heads, dim_qk, dim_in)
- self.w_qr = randw(nb_heads, dim_qk, dim_in)
- self.w_k = randw(nb_heads, dim_qk, dim_in)
- self.w_v = randw(nb_heads, dim_v, dim_in)
- self.w_o = randw(dim_v * nb_heads, dim_in)
+ self.w_qw = randw(nb_heads, dim_qk, dim_model)
+ self.w_qr = randw(nb_heads, dim_qk, dim_model)
+ self.w_k = randw(nb_heads, dim_qk, dim_model)
+ self.w_v = randw(nb_heads, dim_v, dim_model)
+ self.w_o = randw(dim_v * nb_heads, dim_model)
def reset_inner_loss(self):
self.acc_attention = 0
##############################
+# Returns a tensor with an additional index at rank win_dim, that move
+# along the same dimension as dim, on a domain {0...win_size-1}, and
+# dim is restricted on a domain reduced by win_size-1 values.
+
+
def moving_window(x, dim, win_dim, win_size):
size, stride = x.size(), x.stride()
size = size[:dim] + (size[dim] - win_size + 1,) + size[dim + 1 :]
class Caterpillar(nn.Module):
def __init__(
self,
- dim_in,
+ dim_model,
dim_qk,
dim_v,
nb_heads,
caterpillar_height,
attention_dropout=0.0,
len_max=1e5,
+ logger=print,
+ **kwargs,
):
super().__init__()
warnings.warn("Caterpillar", RuntimeWarning)
- def randw(*d):
- return nn.Parameter(torch.randn(*d) / math.sqrt(d[-1]))
+ def randw(*d, amplitude=None):
+ if amplitude is None:
+ amplitude = 1 / math.sqrt(d[-1])
+ return nn.Parameter(amplitude * torch.randn(*d))
self.caterpillar_length = caterpillar_length
self.caterpillar_height = caterpillar_height
self.attention_dropout = attention_dropout
- self.w_G = randw(nb_heads, caterpillar_height, dim_in)
- self.b_G = nn.Parameter(
- torch.full(
- (nb_heads, caterpillar_height), -math.log(caterpillar_height - 1)
- )
- )
+ ######################################################################
+ # sup_args
+
+ x = kwargs.get("gate_dropout")
+ if x is None:
+ self.proba_gate_dropout = 0.0
+ else:
+ self.proba_gate_dropout = float(x)
- self.w_K = randw(nb_heads, dim_qk, dim_in)
- self.w_V = randw(nb_heads, dim_v, dim_in)
- self.w_Q = randw(nb_heads, dim_qk, dim_in)
- self.w_O = randw(dim_v * nb_heads, dim_in)
+ logger(f"self.proba_gate_dropout {self.proba_gate_dropout}")
- self.init_K_rec = randw(caterpillar_height, caterpillar_length, dim_qk)
- self.init_V_rec = randw(caterpillar_height, caterpillar_length, dim_v)
+ x = kwargs.get("default_bg")
+ if x is None:
+ default_bg = -math.log(caterpillar_height - 1)
+ else:
+ default_bg = float(x)
+
+ logger(f"default_bg {default_bg}")
+
+ ######################################################################
+
+ self.w_G = randw(nb_heads, caterpillar_height, dim_model)
+ self.b_G = nn.Parameter(torch.full((nb_heads, caterpillar_height), default_bg))
+
+ self.w_K = randw(nb_heads, dim_qk, dim_model)
+ self.w_V = randw(nb_heads, dim_v, dim_model)
+ self.w_Q = randw(nb_heads, dim_qk, dim_model)
+ self.w_O = randw(dim_v * nb_heads, dim_model)
+
+ self.init_K_rec = randw(
+ caterpillar_height,
+ caterpillar_length,
+ dim_qk,
+ )
+ self.init_V_rec = randw(
+ caterpillar_height,
+ caterpillar_length,
+ dim_v,
+ )
def reset_inner_loss(self):
self.acc_attention = 0
N = bs.x.size(0)
T = bs.x.size(1)
+ H = self.w_V.size(0)
DV = self.w_V.size(1)
DK = self.w_K.size(1)
- Dout = self.w_O.size(1)
- CH = self.caterpillar_height
- CL = self.caterpillar_length
+ DM = self.w_O.size(1)
+ R = self.caterpillar_height
+ L = self.caterpillar_length
assert (
- t0 >= CL and (t1 - t0) % CL == 0
+ t0 >= L and (t1 - t0) % L == 0
), f"bs.first should be greater than caterpillar_length, and bs.nb should be a multiple of caterpillar_length"
+ # We cache values to deal efficiently with auto-regression
+
if bs.init_cache:
- self.rec_V = X.new_zeros(N, CH, T, DV)
- self.rec_K = X.new_zeros(N, CH, T, DK)
+ self.rec_V = X.new_zeros(N, R, T, DV)
+ self.rec_K = X.new_zeros(N, R, T, DK)
# We start the recurrent sequences with optimizable
# initial values. No idea if it helps.
- self.rec_V[:, :, t0 - CL : t0] = self.init_V_rec[None, :, :, :]
- self.rec_K[:, :, t0 - CL : t0] = self.init_K_rec[None, :, :, :]
+ self.rec_V[:, :, t0 - L : t0, :] = self.init_V_rec[None, :, :, :]
+ self.rec_K[:, :, t0 - L : t0, :] = self.init_K_rec[None, :, :, :]
- self.cache_Y = X.new_zeros(N, T, Dout)
+ self.cache_Y = X.new_zeros(N, T, DM)
+
+ V = torch.einsum("ntc,hdc->nhtd", X, self.w_V)
+ K = torch.einsum("ntc,hdc->nhtd", X, self.w_K)
######################################################################
# Compute the recurrent state
- # This is the Gating sequence that modulates if they key and
- # values should be stored in one of the CH pairs of the
- # current stack. The CH gating values are independent, which
- # means that the same thing could be stored multiple times or
- # not at all
+ # This is the Gating sequence that modulates the storing of
+ # the new key and value in the R pairs of the current
+ # stack. There are R independent gating values, which means
+ # that the current K/V may be stored in multiple pairs of the
+ # recurrent state, or not at all.
G = (
- torch.einsum("ntc,hec->nhet", X, self.w_G) + self.b_G[None, :, :, None]
+ torch.einsum("ntc,hrc->nhrt", X, self.w_G) + self.b_G[None, :, :, None]
).sigmoid()
- V = torch.einsum("ntc,hdc->nhtd", X, self.w_V)
- K = torch.einsum("ntc,hdc->nhtd", X, self.w_K)
+ # warnings.warn("softmax gating", RuntimeWarning)
+
+ # G = (
+ # torch.einsum("ntc,hrc->nhrt", X, self.w_G) + self.b_G[None, :, :, None]
+ # ).softmax(dim=2)
+
+ ######################################################################
+ # The "flashbacks"
+
+ if self.training and self.proba_gate_dropout > 0.0:
+ # This is a better implementation of "flashbacks".
+
+ # G is NxHxExT where e is the caterpillar's row.
+
+ warnings.warn("gate dropout", RuntimeWarning)
+
+ kill = (
+ torch.rand(G.size(), device=G.device) <= self.proba_gate_dropout
+ ).float()
+
+ alpha = G / (1 - self.proba_gate_dropout)
- # We prepare the arguments for the parallel scan
+ G = alpha * (1 - kill)
- A = 1 - G.sum(1)
- gated_V = torch.einsum("nhet,nhtd->netd", G, V)
- gated_K = torch.einsum("nhet,nhtd->netd", G, K)
+ def recurrence(G, V, K):
+ # Clip the gating to avoid values greater than 1 when several
+ # heads hit the same row
- init_rec_V = self.rec_V[:, :, t0 - CL : t0]
- init_rec_K = self.rec_K[:, :, t0 - CL : t0]
+ G = G / G.sum(1, keepdim=True).clamp(min=1)
- # Here there is a trick: The parallel scan operates with a
- # period of L, so we split the sequence indexing in two axes,
- # the second of size CL, and run the parallel scan using the
- # other alone as the sequence index.
+ # We prepare the arguments for the parallel scan
- A = A.unflatten(2, (-1, CL))
- gated_V = gated_V.unflatten(2, (-1, CL))
- gated_K = gated_K.unflatten(2, (-1, CL))
+ A = 1 - G.sum(1)
- next_V = pscan_dim(A, gated_V, init_rec_V, dim=2)
- next_K = pscan_dim(A, gated_K, init_rec_K, dim=2)
+ gated_V = torch.einsum("nhrt,nhtd->nrtd", G, V)
+ gated_K = torch.einsum("nhrt,nhtd->nrtd", G, K)
- # Put back the sequence index
+ # We start from cached values, which matters in inference
- self.rec_V[:, :, t0:t1] = next_V.flatten(2, 3)
- self.rec_K[:, :, t0:t1] = next_K.flatten(2, 3)
+ init_rec_V = self.rec_V[:, :, t0 - L : t0]
+ init_rec_K = self.rec_K[:, :, t0 - L : t0]
+
+ # Associative scan
+
+ # Here there is a trick: Since the stack at position t is
+ # computed by updating that at position t-L, the parallel
+ # scan operates with a period of L. To do so we split the
+ # sequence indexing in two axes, the second of size L, and
+ # run the parallel scan using the first as the sequence index.
+
+ A = A.unflatten(2, (-1, L))
+ gated_V = gated_V.unflatten(2, (-1, L))
+ gated_K = gated_K.unflatten(2, (-1, L))
+
+ next_V = pscan_dim(A, gated_V, init_rec_V, dim=2)
+ next_K = pscan_dim(A, gated_K, init_rec_K, dim=2)
+
+ next_V = next_V.flatten(2, 3)
+ next_K = next_K.flatten(2, 3)
+
+ return next_V, next_K
+
+ #################################################################
+
+ next_V, next_K = recurrence(G, V, K)
+
+ self.rec_V[:, :, t0:t1] = next_V
+ self.rec_K[:, :, t0:t1] = next_K
######################################################################
# compute the readout
# the column in the caterpillar
windowed_V = moving_window(
- self.rec_V[:, :, t0 - CL + 1 : t1], dim=2, win_dim=3, win_size=CL
+ self.rec_V[:, :, t0 - L + 1 : t1], dim=2, win_dim=3, win_size=L
)
windowed_K = moving_window(
- self.rec_K[:, :, t0 - CL + 1 : t1], dim=2, win_dim=3, win_size=CL
+ self.rec_K[:, :, t0 - L + 1 : t1], dim=2, win_dim=3, win_size=L
)
- # We have an attention score for each of the CHxCL value
+ # We have an attention score for each of the RxL values
ar = torch.einsum(
"nhtd,nftld->nhtfl",
class QKVAttention(nn.Module):
def __init__(
self,
- dim_in,
+ dim_model,
dim_qk,
dim_v,
nb_heads=1,
causal=False,
attention_dropout=0.0,
+ logger=print,
+ **kwargs,
):
super().__init__()
self.attention_dropout = attention_dropout
self.record_attention = False
- self.w_q = randw(nb_heads, dim_qk, dim_in)
- self.w_k = randw(nb_heads, dim_qk, dim_in)
- self.w_v = randw(nb_heads, dim_v, dim_in)
- self.w_o = randw(dim_v * nb_heads, dim_in)
+ self.w_q = randw(nb_heads, dim_qk, dim_model)
+ self.w_k = randw(nb_heads, dim_qk, dim_model)
+ self.w_v = randw(nb_heads, dim_v, dim_model)
+ self.w_o = randw(dim_v * nb_heads, dim_model)
def forward(self, bs):
x_q = bs.x
nb_blocks,
nb_lines=None,
caterpillar_height=None,
- dim_rec_v=-1,
causal=False,
dropout=0.0,
len_max=1e5,
attention_layer="kvrec",
+ logger=print,
+ **kwargs,
):
super().__init__()
- assert attention_layer in {"mha", "dumbrec", "kvrec", "caterpillar"}
+ assert attention_layer in {
+ "mha",
+ "dumbrec",
+ "kvrec",
+ "caterpillar",
+ }, f"Unknown attention operator {attention_layer}."
if attention_layer == "caterpillar":
assert nb_lines % caterpillar_height == 0
def attlayer():
if attention_layer == "mha":
return QKVAttention(
- dim_in=dim_model,
+ dim_model=dim_model,
dim_qk=dim_keys,
dim_v=dim_model // nb_heads,
nb_heads=nb_heads,
causal=causal,
attention_dropout=dropout,
+ logger=logger,
+ **kwargs,
)
elif attention_layer == "dumbrec":
return DumbRec(
- dim_in=dim_model,
+ dim_model=dim_model,
dim_qk=dim_keys,
- dim_v=dim_rec_v,
+ dim_v=dim_model // nb_heads,
nb_heads=nb_heads,
nb_lines=nb_lines,
attention_dropout=dropout,
+ logger=logger,
+ **kwargs,
)
elif attention_layer == "kvrec":
return KVRec(
- dim_in=dim_model,
+ dim_model=dim_model,
dim_qk=dim_keys,
- dim_v=dim_rec_v,
+ dim_v=dim_model // nb_heads,
nb_heads=nb_heads,
nb_lines=nb_lines,
attention_dropout=dropout,
+ logger=logger,
+ **kwargs,
)
elif attention_layer == "caterpillar":
return Caterpillar(
- dim_in=dim_model,
+ dim_model=dim_model,
dim_qk=dim_keys,
- dim_v=dim_rec_v,
+ dim_v=dim_model // nb_heads,
nb_heads=nb_heads,
caterpillar_length=self.caterpillar_length,
caterpillar_height=self.caterpillar_height,
attention_dropout=dropout,
+ logger=logger,
+ **kwargs,
)
else:
raise ValueError(f"Unknown attention type {attention_layer}.")
print("Basic check.")
m = Caterpillar(
- dim_in=4,
+ dim_model=4,
dim_qk=3,
dim_v=7,
nb_heads=1,