import pscan
-
# X is /.../xTxD A is /.../xT Y_init is /.../xD
return Y
+def pscan_rgrad(grad_Y, A, X, Y_init, dim=-2, eps=1e-2):
+ with torch.no_grad():
+ s_A, s_X = 0, 0
+ for t in range(X.size(dim) - 1, 0, -1):
+ delta = (grad_Y[t] - s_A) / A[t].grad
+ s_A += A[t].grad * delta
+ A[t].grad = delta
+ delta = (grad_Y[t] - s_X) / X[t].grad
+ s_X += X[t].grad * delta
+ X[t].grad = delta
+
+
def pscan_shape(A, X, Y_init):
s = X.size()
A = A.reshape(-1, s[-2])
nb_lines,
attention_dropout=0.0,
len_max=1e5,
+ logger=print,
+ args=None,
):
super().__init__()
nb_lines,
attention_dropout=0.0,
len_max=1e5,
+ logger=print,
+ args=None,
):
super().__init__()
caterpillar_height,
attention_dropout=0.0,
len_max=1e5,
+ logger=print,
+ args=None,
):
super().__init__()
self.caterpillar_height = caterpillar_height
self.attention_dropout = attention_dropout
- self.proba_gate_dropout = 0.0
+ self.gate_dropout_proba = args.gate_dropout_proba
+ self.gate_dropout_sync = args.gate_dropout_sync
+
+ ######################################################################
+ default_bg = -math.log(caterpillar_height - 1)
self.w_G = randw(nb_heads, caterpillar_height, dim_model)
- self.b_G = nn.Parameter(
- torch.full(
- (nb_heads, caterpillar_height), -math.log(caterpillar_height - 1)
- )
- )
+ self.b_G = nn.Parameter(torch.full((nb_heads, caterpillar_height), default_bg))
self.w_K = randw(nb_heads, dim_qk, dim_model)
self.w_V = randw(nb_heads, dim_v, dim_model)
self.rec_K = X.new_zeros(N, R, T, DK)
# We start the recurrent sequences with optimizable
# initial values. No idea if it helps.
- self.rec_V[:, :, t0 - L : t0] = self.init_V_rec[None, :, :, :]
- self.rec_K[:, :, t0 - L : t0] = self.init_K_rec[None, :, :, :]
+ self.rec_V[:, :, t0 - L : t0, :] = self.init_V_rec[None, :, :, :]
+ self.rec_K[:, :, t0 - L : t0, :] = self.init_K_rec[None, :, :, :]
self.cache_Y = X.new_zeros(N, T, DM)
torch.einsum("ntc,hrc->nhrt", X, self.w_G) + self.b_G[None, :, :, None]
).sigmoid()
- ######################################################################
- # Roll the gating indexes
-
- warnings.warn("rotating barrel", RuntimeWarning)
-
- # print(f"SANITY2 {N=} {H=} {R=} {t0=} {t1=} {G.size()=}")
-
- n_barrel = torch.arange(N, device=G.device)[:, None, None, None]
- h_barrel = torch.arange(H, device=G.device)[None, :, None, None]
- r_barrel = torch.arange(R, device=G.device)[None, None, :, None]
- t_barrel = torch.arange(t1 - t0, device=G.device)[None, None, None, :]
- r_barrel = (r_barrel + (t_barrel + t0) // L) % R
-
- # GG = G.gather(dim=2,index=r_barrel)
- G = G[n_barrel, h_barrel, r_barrel, t_barrel]
+ # Clip the gating to avoid values greater than 1 when several
+ # heads hit the same row
- # print("SANITY", (GG-G).abs())
- # exit(0)
+ G = G / G.sum(1, keepdim=True).clamp(min=1)
######################################################################
- # The "flashbacks"
- if self.training and self.proba_gate_dropout > 0.0:
- # This is a better implementation of "flashbacks".
+ def recurrence(G, V, K):
+ # We prepare the arguments for the parallel scan
- # G is NxHxExT where e is the caterpillar's row.
+ A = 1 - G.sum(1)
- warnings.warn("gate dropout", RuntimeWarning)
- epsilon = 0.5
-
- dropout_head = (
- (torch.rand(N, H, 1, t1 - t0, device=G.device).sort(dim=3).indices == 0)
- .expand_as(G)
- .float()
- )
+ gated_V = torch.einsum("nhrt,nhtd->nrtd", G, V)
+ gated_K = torch.einsum("nhrt,nhtd->nrtd", G, K)
- dropout_tail = dropout_head.cumsum(dim=3) - dropout_head
+ # We start from cached values, which matters in inference
- dropout_active = (
- torch.rand(N, 1, 1, 1, device=G.device) < self.proba_gate_dropout
- ).long()
+ init_rec_V = self.rec_V[:, :, t0 - L : t0]
+ init_rec_K = self.rec_K[:, :, t0 - L : t0]
- dropout_head *= dropout_active
- dropout_tail *= dropout_active
+ # Here there is a trick: Since the stack at position t is
+ # computed by updating that at position t-L, the parallel
+ # scan operates with a period of L. To do so we split the
+ # sequence indexing in two axes, the second of size L, and
+ # run the parallel scan using the first as the sequence index.
- G = (
- G
- + dropout_head * (1 - epsilon - G.detach())
- - dropout_tail * G.detach()
- )
+ A = A.unflatten(2, (-1, L))
+ gated_V = gated_V.unflatten(2, (-1, L))
+ gated_K = gated_K.unflatten(2, (-1, L))
- ######################################################################
+ next_V = pscan_dim(A, gated_V, init_rec_V, dim=2)
+ next_K = pscan_dim(A, gated_K, init_rec_K, dim=2)
- # We prepare the arguments for the parallel scan
+ next_V = next_V.flatten(2, 3)
+ next_K = next_K.flatten(2, 3)
- # Clip the gating to avoid values greater than 1 when several
- # heads hit the same row
+ return next_V, next_K
- G = G / G.sum(1, keepdim=True).clamp(min=1)
-
- A = 1 - G.sum(1)
+ #################################################################
- # warnings.warn("harmonic recurrence", RuntimeWarning)
- # har = torch.arange(t0, t1, device = G.device).float() + 1
- # A = har / (har + 1)
- # G = G / har
+ next_V, next_K = recurrence(G, V, K)
- gated_V = torch.einsum("nhrt,nhtd->nrtd", G, V)
- gated_K = torch.einsum("nhrt,nhtd->nrtd", G, K)
+ if self.training and self.gate_dropout_proba > 0.0:
+ # G is NxHxRxT where r is the caterpillar's row.
- # We start from cached values, which matters in inference
+ warnings.warn("gate dropout", RuntimeWarning)
- init_rec_V = self.rec_V[:, :, t0 - L : t0]
- init_rec_K = self.rec_K[:, :, t0 - L : t0]
+ # Pick a point in each of the NxHxR timeline and set this
+ # entry and the following to 1
+ kill = (
+ torch.rand(N, H, R, t1 - t0, device=G.device).sort(dim=3).indices == 0
+ ).cumsum(dim=3)
- #################################################################
- # Associative scan
+ # Keep these mask for only some of the NxHxR
+ kill = kill * (
+ torch.rand(N, H, R, 1, device=G.device) <= self.gate_dropout_proba
+ )
- # Here there is a trick: Since the stack at position t is
- # computed by updating that at position t-L, the parallel
- # scan operates with a period of L. To do so we split the
- # sequence indexing in two axes, the second of size L, and
- # run the parallel scan using the first as the sequence index.
+ # The coefficient to keep are the complementary
+ mask = 1 - kill
- A = A.unflatten(2, (-1, L))
- gated_V = gated_V.unflatten(2, (-1, L))
- gated_K = gated_K.unflatten(2, (-1, L))
+ masked_next_V, masked_next_K = recurrence(G * mask, V, K)
- next_V = pscan_dim(A, gated_V, init_rec_V, dim=2)
- next_K = pscan_dim(A, gated_K, init_rec_K, dim=2)
+ next_V = next_V.detach() + (masked_next_V - masked_next_V.detach()) / (
+ 1 - self.gate_dropout_proba
+ )
+ next_K = next_K.detach() + (masked_next_K - masked_next_K.detach()) / (
+ 1 - self.gate_dropout_proba
+ )
- self.rec_V[:, :, t0:t1] = next_V.flatten(2, 3)
- self.rec_K[:, :, t0:t1] = next_K.flatten(2, 3)
+ self.rec_V[:, :, t0:t1] = next_V
+ self.rec_K[:, :, t0:t1] = next_K
######################################################################
# compute the readout
Q = torch.einsum("ntc,hdc->nhtd", X, self.w_Q)
- # We build tensors NxHxTxFxL where N is the sample index, H
- # the head, T the time, F the row in the caterpillar, and L
+ # We build tensors NxHxTxRxL where N is the sample index, H
+ # the head, T the time, R the row in the caterpillar, and L
# the column in the caterpillar
windowed_V = moving_window(
# We have an attention score for each of the RxL values
ar = torch.einsum(
- "nhtd,nftld->nhtfl",
+ "nhtd,nrtld->nhtrl",
Q,
windowed_K,
) / math.sqrt(DK)
nb_heads=1,
causal=False,
attention_dropout=0.0,
+ logger=print,
+ args=None,
):
super().__init__()
dropout=0.0,
len_max=1e5,
attention_layer="kvrec",
+ logger=print,
+ args=None,
):
super().__init__()
nb_heads=nb_heads,
causal=causal,
attention_dropout=dropout,
+ logger=logger,
+ args=args,
)
elif attention_layer == "dumbrec":
return DumbRec(
nb_heads=nb_heads,
nb_lines=nb_lines,
attention_dropout=dropout,
+ logger=logger,
+ args=args,
)
elif attention_layer == "kvrec":
return KVRec(
nb_heads=nb_heads,
nb_lines=nb_lines,
attention_dropout=dropout,
+ logger=logger,
+ args=args,
)
elif attention_layer == "caterpillar":
return Caterpillar(
caterpillar_length=self.caterpillar_length,
caterpillar_height=self.caterpillar_height,
attention_dropout=dropout,
+ logger=logger,
+ args=args,
)
else:
raise ValueError(f"Unknown attention type {attention_layer}.")