# with a caching mechanism for keys and values to avoid a O(N^3) cost
# for auto-regression.
+# This implementation is equipped with RNN layers to replace the MHA
+
import math, warnings
import torch, einops
from torch import nn
from torch.nn import functional as F
-from functorch.dim import dims
import ffutils
+# from blanket import blanket
+
# import memload
######################################################################
# 1 for the successive tokens.
#
# Modules able to process brackets may implement a cache that is
-# resetted when the input bracket starts at t=0
+# resetted when init_cache is True
class BracketedSequence:
import pscan
-
# X is /.../xTxD A is /.../xT Y_init is /.../xD
return Y
+def pscan_rgrad(grad_Y, A, X, Y_init, dim=-2, eps=1e-2):
+ with torch.no_grad():
+ s_A, s_X = 0, 0
+ for t in range(X.size(dim) - 1, 0, -1):
+ delta = (grad_Y[t] - s_A) / A[t].grad
+ s_A += A[t].grad * delta
+ A[t].grad = delta
+ delta = (grad_Y[t] - s_X) / X[t].grad
+ s_X += X[t].grad * delta
+ X[t].grad = delta
+
+
def pscan_shape(A, X, Y_init):
s = X.size()
A = A.reshape(-1, s[-2])
class DumbRec(nn.Module):
def __init__(
self,
- dim_in,
+ dim_model,
dim_qk,
dim_v,
nb_heads,
nb_lines,
attention_dropout=0.0,
len_max=1e5,
+ logger=print,
+ args=None,
):
super().__init__()
self.k_star = randw(nb_lines, dim_qk)
- self.w_qw = randw(nb_heads, dim_qk, dim_in)
- self.w_qr = randw(nb_heads, dim_qk, dim_in)
- # self.w_k = randw(nb_heads, dim_qk, dim_in)
- self.w_v = randw(nb_heads, dim_v, dim_in)
- self.w_o = randw(dim_v * nb_heads, dim_in)
+ self.w_qw = randw(nb_heads, dim_qk, dim_model)
+ self.w_qr = randw(nb_heads, dim_qk, dim_model)
+ # self.w_k = randw(nb_heads, dim_qk, dim_model)
+ self.w_v = randw(nb_heads, dim_v, dim_model)
+ self.w_o = randw(dim_v * nb_heads, dim_model)
def reset_inner_loss(self):
self.acc_attention = 0
class KVRec(nn.Module):
def __init__(
self,
- dim_in,
+ dim_model,
dim_qk,
dim_v,
nb_heads,
nb_lines,
attention_dropout=0.0,
len_max=1e5,
+ logger=print,
+ args=None,
):
super().__init__()
self.k_star = randw(nb_lines, dim_qk)
- self.w_qw = randw(nb_heads, dim_qk, dim_in)
- self.w_qr = randw(nb_heads, dim_qk, dim_in)
- self.w_k = randw(nb_heads, dim_qk, dim_in)
- self.w_v = randw(nb_heads, dim_v, dim_in)
- self.w_o = randw(dim_v * nb_heads, dim_in)
+ self.w_qw = randw(nb_heads, dim_qk, dim_model)
+ self.w_qr = randw(nb_heads, dim_qk, dim_model)
+ self.w_k = randw(nb_heads, dim_qk, dim_model)
+ self.w_v = randw(nb_heads, dim_v, dim_model)
+ self.w_o = randw(dim_v * nb_heads, dim_model)
def reset_inner_loss(self):
self.acc_attention = 0
def forward(self, bs):
x_q, t0, t1 = bs.x, bs.first, bs.first + bs.nb
- # n,h,l,t,d = dims(5)
-
if bs.init_cache:
self.rec_v = x_q.new_zeros(
x_q.size(0), self.nb_lines, x_q.size(1), self.w_v.size(1)
##############################
+# Returns a tensor with an additional index at rank win_dim, that move
+# along the same dimension as dim, on a domain {0...win_size-1}, and
+# dim is restricted on a domain reduced by win_size-1 values.
+
+
def moving_window(x, dim, win_dim, win_size):
size, stride = x.size(), x.stride()
size = size[:dim] + (size[dim] - win_size + 1,) + size[dim + 1 :]
class Caterpillar(nn.Module):
def __init__(
self,
- dim_in,
+ dim_model,
dim_qk,
dim_v,
nb_heads,
caterpillar_height,
attention_dropout=0.0,
len_max=1e5,
+ logger=print,
+ args=None,
):
super().__init__()
warnings.warn("Caterpillar", RuntimeWarning)
- def randw(*d):
- return nn.Parameter(torch.randn(*d) / math.sqrt(d[-1]))
+ def randw(*d, factor=1):
+ return nn.Parameter(torch.randn(*d) * factor / math.sqrt(d[-1]))
self.caterpillar_length = caterpillar_length
self.caterpillar_height = caterpillar_height
self.attention_dropout = attention_dropout
- self.w_G = randw(nb_heads, caterpillar_height, dim_in)
- self.b_G = nn.Parameter(
- torch.full(
- (nb_heads, caterpillar_height), -math.log(caterpillar_height - 1)
- )
- )
+ ######################################################################
- self.w_K = randw(nb_heads, dim_qk, dim_in)
- self.w_V = randw(nb_heads, dim_v, dim_in)
- self.w_Q = randw(nb_heads, dim_qk, dim_in)
- self.w_O = randw(dim_v * nb_heads, dim_in)
+ self.w_G = randw(nb_heads, caterpillar_height, dim_model)
+ self.b_G = nn.Parameter(torch.full((nb_heads, caterpillar_height), 0.0))
- self.init_K_rec = randw(caterpillar_height, caterpillar_length, dim_qk)
- self.init_V_rec = randw(caterpillar_height, caterpillar_length, dim_v)
+ self.w_K = randw(nb_heads, dim_qk, dim_model)
+ self.w_V = randw(nb_heads, dim_v, dim_model)
+ self.w_Q = randw(nb_heads, dim_qk, dim_model)
+ self.w_O = randw(dim_v * nb_heads, dim_model)
- def reset_inner_loss(self):
- self.acc_attention = 0
- self.acc_nb = 0
+ self.init_K_rec = randw(
+ caterpillar_height,
+ caterpillar_length,
+ dim_qk,
+ )
+ self.init_V_rec = randw(
+ caterpillar_height,
+ caterpillar_length,
+ dim_v,
+ )
- def get_inner_loss(self):
- # warnings.warn("l2 regularization", RuntimeWarning)
- # return (self.acc_attention / self.acc_nb).pow(2).sum()
- return torch.tensor([0], device=self.w_Q.device)
+ # def reset_inner_loss(self):
+ # self.acc_attention = 0
+ # self.acc_nb = 0
+
+ # def get_inner_loss(self):
+ # warnings.warn("l2 regularization", RuntimeWarning)
+ # return (self.acc_attention / self.acc_nb).pow(2).sum()
+ # return torch.tensor([0], device=self.w_Q.device)
def forward(self, bs):
# Dimensions to make the source a bit clearer, that's needed
N = bs.x.size(0)
T = bs.x.size(1)
+ H = self.w_V.size(0)
DV = self.w_V.size(1)
DK = self.w_K.size(1)
- Dout = self.w_O.size(1)
- CH = self.caterpillar_height
- CL = self.caterpillar_length
+ DM = self.w_O.size(1)
+ R = self.caterpillar_height
+ L = self.caterpillar_length
assert (
- t0 >= CL and (t1 - t0) % CL == 0
+ t0 >= L and (t1 - t0) % L == 0
), f"bs.first should be greater than caterpillar_length, and bs.nb should be a multiple of caterpillar_length"
+ # We cache values to deal efficiently with auto-regression
+
if bs.init_cache:
- self.rec_V = X.new_zeros(N, CH, T, DV)
- self.rec_V[:, :, t0 - CL : t0] = self.init_V_rec[None, :, :, :]
- self.rec_K = X.new_zeros(N, CH, T, DK)
- self.rec_K[:, :, t0 - CL : t0] = self.init_K_rec[None, :, :, :]
- self.cache_Y = X.new_zeros(N, T, Dout)
+ self.rec_V = X.new_zeros(N, R, T, DV)
+ self.rec_K = X.new_zeros(N, R, T, DK)
+ # We start the recurrent sequences with optimizable
+ # initial values. No idea if it helps.
+ self.rec_V[:, :, t0 - L : t0, :] = self.init_V_rec[None, :, :, :]
+ self.rec_K[:, :, t0 - L : t0, :] = self.init_K_rec[None, :, :, :]
+
+ self.cache_Y = X.new_zeros(N, T, DM)
+
+ V = torch.einsum("ntc,hdc->nhtd", X, self.w_V)
+ K = torch.einsum("ntc,hdc->nhtd", X, self.w_K)
######################################################################
# Compute the recurrent state
+ # This is the Gating sequence that modulates the storing of
+ # the new key and value in the R pairs of the current
+ # stack. There are R independent gating values, which means
+ # that the current K/V may be stored in multiple pairs of the
+ # recurrent state, or not at all.
+
G = (
- torch.einsum("ntc,hec->nhet", X, self.w_G) + self.b_G[None, :, :, None]
+ torch.einsum("ntc,hrc->nhrt", X, self.w_G) + self.b_G[None, :, :, None]
).sigmoid()
- V = torch.einsum("ntc,hdc->nhtd", X, self.w_V)
- K = torch.einsum("ntc,hdc->nhtd", X, self.w_K)
+ # Clip the gating to avoid values greater than 1 when several
+ # heads hit the same row
+
+ G = G / G.sum(1, keepdim=True).clamp(min=1)
+
+ ######################################################################
- A = 1 - G.sum(1)
- gated_V = torch.einsum("nhet,nhtd->netd", G, V)
- gated_K = torch.einsum("nhet,nhtd->netd", G, K)
+ A = 1 - G.sum(dim=1)
- init_rec_V = self.rec_V[:, :, t0 - CL : t0]
- init_rec_K = self.rec_K[:, :, t0 - CL : t0]
+ gated_V = torch.einsum("nhrt,nhtd->nrtd", G, V)
+ gated_K = torch.einsum("nhrt,nhtd->nrtd", G, K)
- A = A.unflatten(2, (-1, CL))
- gated_V = gated_V.unflatten(2, (-1, CL))
- gated_K = gated_K.unflatten(2, (-1, CL))
+ # We start from cached values, which matters in inference
- next_V = pscan_dim(A, gated_V, init_rec_V, dim=2)
- next_K = pscan_dim(A, gated_K, init_rec_K, dim=2)
+ init_rec_V = self.rec_V[:, :, t0 - L : t0]
+ init_rec_K = self.rec_K[:, :, t0 - L : t0]
- self.rec_V[:, :, t0:t1] = next_V.flatten(2, 3)
- self.rec_K[:, :, t0:t1] = next_K.flatten(2, 3)
+ # Here there is a trick: Since the stack at position t is
+ # computed by updating that at position t-L, the parallel
+ # scan operates with a period of L. To do so we split the
+ # sequence indexing in two axes, the second of size L, and
+ # run the parallel scan using the first as the sequence index.
+
+ A = A.unflatten(2, (-1, L))
+ gated_V = gated_V.unflatten(2, (-1, L))
+ gated_K = gated_K.unflatten(2, (-1, L))
+
+ next_V = pscan_dim(A, gated_V, init_rec_V, dim=2).flatten(2, 3)
+ next_K = pscan_dim(A, gated_K, init_rec_K, dim=2).flatten(2, 3)
+
+ self.rec_V[:, :, t0:t1] = next_V
+ self.rec_K[:, :, t0:t1] = next_K
######################################################################
# compute the readout
Q = torch.einsum("ntc,hdc->nhtd", X, self.w_Q)
- uv = moving_window(
- self.rec_V[:, :, t0 - CL + 1 : t1], dim=2, win_dim=3, win_size=CL
+ # Q = blanket(Q)
+
+ # We build tensors NxHxTxRxL where N is the sample index, H
+ # the head, T the time, R the row in the caterpillar, and L
+ # the column in the caterpillar
+
+ windowed_V = moving_window(
+ self.rec_V[:, :, t0 - L + 1 : t1], dim=2, win_dim=3, win_size=L
)
- uk = moving_window(
- self.rec_K[:, :, t0 - CL + 1 : t1], dim=2, win_dim=3, win_size=CL
+ windowed_K = moving_window(
+ self.rec_K[:, :, t0 - L + 1 : t1], dim=2, win_dim=3, win_size=L
)
+ # We have an attention score for each of the RxL values
+
ar = torch.einsum(
- "nhtd,nftld->nhtfl",
+ "nhtd,nrtld->nhtrl",
Q,
- uk,
+ windowed_K,
) / math.sqrt(DK)
+ # softmax can operate only on one dimension, hence the
+ # flattening
+
ar = ar.flatten(3).softmax(dim=3).view(ar.size())
ar = F.dropout(ar, self.attention_dropout, self.training)
+ # Compute the output for each head, flatten to concatenate
+
Y = torch.einsum(
"nhtfl,nftld->nthd",
ar,
- uv,
+ windowed_V,
).flatten(2)
self.cache_Y[:, t0:t1] = Y @ self.w_O
class QKVAttention(nn.Module):
def __init__(
self,
- dim_in,
+ dim_model,
dim_qk,
dim_v,
nb_heads=1,
causal=False,
+ horizon=None,
attention_dropout=0.0,
+ logger=print,
+ args=None,
):
super().__init__()
return nn.Parameter(torch.randn(*d) / math.sqrt(d[-1]))
self.causal = causal
+ self.horizon = horizon
self.attention_dropout = attention_dropout
self.record_attention = False
- self.w_q = randw(nb_heads, dim_qk, dim_in)
- self.w_k = randw(nb_heads, dim_qk, dim_in)
- self.w_v = randw(nb_heads, dim_v, dim_in)
- self.w_o = randw(dim_v * nb_heads, dim_in)
+ self.w_q = randw(nb_heads, dim_qk, dim_model)
+ self.w_k = randw(nb_heads, dim_qk, dim_model)
+ self.w_v = randw(nb_heads, dim_v, dim_model)
+ self.w_o = randw(dim_v * nb_heads, dim_model)
def forward(self, bs):
x_q = bs.x
torch.arange(x_q.size(1), device=q.device)[None, None, :, None]
< torch.arange(x_q.size(1), device=q.device)[None, None, None, :]
)
+
+ if self.horizon is not None:
+ self.cache_attzero = torch.logical_or(
+ self.cache_attzero,
+ torch.arange(x_q.size(1), device=q.device)[None, None, :, None]
+ >= torch.arange(x_q.size(1), device=q.device)[
+ None, None, None, :
+ ]
+ + self.horizon,
+ )
+
a = a.masked_fill(
self.cache_attzero[
:, :, bs.first : bs.first + bs.nb, : bs.first + bs.nb
nb_blocks,
nb_lines=None,
caterpillar_height=None,
- dim_rec_v=-1,
causal=False,
dropout=0.0,
len_max=1e5,
- attention_layer="kvrec",
+ attention_layer="caterpillar",
+ logger=print,
+ args=None,
):
super().__init__()
- assert attention_layer in {"mha", "dumbrec", "kvrec", "caterpillar"}
+ assert attention_layer in {
+ "mha",
+ "dumbrec",
+ "kvrec",
+ "caterpillar",
+ "attcat",
+ }, f"Unknown attention operator {attention_layer}."
- if attention_layer == "caterpillar":
+ if attention_layer == "caterpillar" or attention_layer == "attcat":
assert nb_lines % caterpillar_height == 0
self.caterpillar_length = nb_lines // caterpillar_height
self.caterpillar_height = caterpillar_height
def attlayer():
if attention_layer == "mha":
- return QKVAttention(
- dim_in=dim_model,
- dim_qk=dim_keys,
- dim_v=dim_model // nb_heads,
- nb_heads=nb_heads,
- causal=causal,
- attention_dropout=dropout,
+ return WithResidual(
+ CacheWrapper(nn.LayerNorm((dim_model,))),
+ QKVAttention(
+ dim_model=dim_model,
+ dim_qk=dim_keys,
+ dim_v=dim_model // nb_heads,
+ nb_heads=nb_heads,
+ causal=causal,
+ attention_dropout=dropout,
+ logger=logger,
+ args=args,
+ ),
)
elif attention_layer == "dumbrec":
- return DumbRec(
- dim_in=dim_model,
- dim_qk=dim_keys,
- dim_v=dim_rec_v,
- nb_heads=nb_heads,
- nb_lines=nb_lines,
- attention_dropout=dropout,
+ return WithResidual(
+ CacheWrapper(nn.LayerNorm((dim_model,))),
+ DumbRec(
+ dim_model=dim_model,
+ dim_qk=dim_keys,
+ dim_v=dim_model // nb_heads,
+ nb_heads=nb_heads,
+ nb_lines=nb_lines,
+ attention_dropout=dropout,
+ logger=logger,
+ args=args,
+ ),
)
elif attention_layer == "kvrec":
- return KVRec(
- dim_in=dim_model,
- dim_qk=dim_keys,
- dim_v=dim_rec_v,
- nb_heads=nb_heads,
- nb_lines=nb_lines,
- attention_dropout=dropout,
+ return WithResidual(
+ CacheWrapper(nn.LayerNorm((dim_model,))),
+ KVRec(
+ dim_model=dim_model,
+ dim_qk=dim_keys,
+ dim_v=dim_model // nb_heads,
+ nb_heads=nb_heads,
+ nb_lines=nb_lines,
+ attention_dropout=dropout,
+ logger=logger,
+ args=args,
+ ),
)
elif attention_layer == "caterpillar":
- return Caterpillar(
- dim_in=dim_model,
- dim_qk=dim_keys,
- dim_v=dim_rec_v,
- nb_heads=nb_heads,
- caterpillar_length=self.caterpillar_length,
- caterpillar_height=self.caterpillar_height,
- attention_dropout=dropout,
+ return WithResidual(
+ CacheWrapper(nn.LayerNorm((dim_model,))),
+ Caterpillar(
+ dim_model=dim_model,
+ dim_qk=dim_keys,
+ dim_v=dim_model // nb_heads,
+ nb_heads=nb_heads,
+ caterpillar_length=self.caterpillar_length,
+ caterpillar_height=self.caterpillar_height,
+ attention_dropout=dropout,
+ logger=logger,
+ args=args,
+ ),
+ )
+ elif attention_layer == "attcat":
+ return nn.Sequential(
+ WithResidual(
+ CacheWrapper(nn.LayerNorm((dim_model,))),
+ QKVAttention(
+ dim_model=dim_model,
+ dim_qk=dim_keys,
+ dim_v=dim_model // nb_heads,
+ nb_heads=nb_heads,
+ causal=causal,
+ horizon=self.caterpillar_length,
+ attention_dropout=dropout,
+ logger=logger,
+ args=args,
+ ),
+ ),
+ WithResidual(
+ CacheWrapper(nn.LayerNorm((dim_model,))),
+ Caterpillar(
+ dim_model=dim_model,
+ dim_qk=dim_keys,
+ dim_v=dim_model // nb_heads,
+ nb_heads=nb_heads,
+ caterpillar_length=self.caterpillar_length,
+ caterpillar_height=self.caterpillar_height,
+ attention_dropout=dropout,
+ logger=logger,
+ args=args,
+ ),
+ ),
)
else:
raise ValueError(f"Unknown attention type {attention_layer}.")
for b in range(nb_blocks):
trunk_blocks += [
- WithResidual(
- CacheWrapper(nn.LayerNorm((dim_model,))),
- attlayer(),
- ),
+ attlayer(),
WithResidual(
CacheWrapper(
nn.LayerNorm((dim_model,)),
######################################################################
if __name__ == "__main__":
- print("Basic check.")
+ import argparse
+
+ import numpy as np
+ import matplotlib.pyplot as plt
+ import matplotlib.collections as mc
+
+ args = argparse.Namespace(
+ gate_dropout_proba=0.0, gate_dropout_sync=True, gate_dropout_replace=False
+ )
+
+ device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
+
+ dim_model, dim_keys, nb_heads = 512, 64, 1
+ dropout = 0.1
+
+ caterpillar = Caterpillar(
+ dim_model=dim_model,
+ dim_qk=dim_keys,
+ dim_v=dim_model // nb_heads,
+ nb_heads=nb_heads,
+ caterpillar_length=16,
+ caterpillar_height=32,
+ attention_dropout=dropout,
+ args=args,
+ ).to(device)
+
+ qkv = QKVAttention(
+ dim_model=dim_model,
+ dim_qk=dim_keys,
+ dim_v=dim_model // nb_heads,
+ nb_heads=nb_heads,
+ causal=True,
+ attention_dropout=dropout,
+ args=args,
+ ).to(device)
+
+ linear = CacheWrapper(nn.Linear(512, 512)).to(device)
+
+ x = torch.randn(1, 256, dim_model)
+
+ x = x.to(device)
+ x.requires_grad_()
+
+ ######################################################################
+
+ fig = plt.figure()
+ fig.set_figheight(6)
+ fig.set_figwidth(8)
+
+ ax = fig.add_subplot(1, 1, 1)
+
+ # ax.set_xlim(-1.5, 1.5)
+ # ax.set_ylim(-1.5, 1.5)
+ # ax.set(aspect=1)
+ # ax.spines.right.set_visible(False)
+ # ax.spines.top.set_visible(False)
+
+ # dt = 0.01
+ # t = np.arange(dt, 20.0, dt)
+ # ax.semilogx(t, np.exp(-t / 5.0))
+ # ax.grid()
+ ax.set_yscale("log")
+
+ ######################################################################
+
+ for label, model, thickness in [
+ ("nn.Linear", linear, 0.2),
+ ("mygpy.QKVAttention", qkv, 1),
+ ("mygpt.Caterpillar", caterpillar, 2),
+ ]:
+ y = model(BracketedSequence(x, 32, x.size(1) - 32, init_cache=True)).x
+
+ for n, p in [("input", x)] + list(model.named_parameters()):
+ print(f"Processing {model}.{n}")
+ data = []
+ for t in range(y.size(1)):
+ sg = 0
+ for d in torch.randperm(y.size(2))[:8]:
+ sg += torch.autograd.grad(y[0, t, d], p, retain_graph=True)[0]
+ assert not sg.isinf().any()
+ assert not sg.isnan().any()
+ data.append([t, sg.sum().item()])
+
+ data = torch.tensor(data)
+ # cx, cy = data[:, 0], data[:, 1]
+ cy = data[:, 1].sort().values
+ cx = torch.linspace(0, 1, cy.size(0))
+ ax.plot(
+ cx, cy, label=label + "." + n, linewidth=thickness
+ ) # , color='gray', label='Input')
+
+ # ax.legend(frameon=False, loc="top right")
+
+ # Put a legend to the right of the current axis
+ box = ax.get_position()
+ ax.set_position([box.x0, box.y0, box.width * 0.8, box.height])
+ ax.legend(loc="center left", bbox_to_anchor=(1, 0.5))
+
+ filename = "plot.pdf"
+ print(f"saving {filename}")
+ fig.savefig(filename, bbox_inches="tight")
+
+ # if args.window and hasattr(plt.get_current_fig_manager(), 'window'):
+ # plt.get_current_fig_manager().window.setGeometry(2, 2, 1024, 768)
+ # plt.show()
+
+ exit(0)
+
+ ######################################################################
m = Caterpillar(
- dim_in=4,
+ dim_model=4,
dim_qk=3,
dim_v=7,
nb_heads=1,
print((y1 - torch.cat([y3a, y3b], dim=1)).abs().max())
exit(0)
- device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
-
vocabulary_size = 128
x = torch.randint(vocabulary_size, (6, 1024))