attention_dropout=0.0,
len_max=1e5,
logger=print,
- **kwargs,
+ args=None,
):
super().__init__()
attention_dropout=0.0,
len_max=1e5,
logger=print,
- **kwargs,
+ args=None,
):
super().__init__()
attention_dropout=0.0,
len_max=1e5,
logger=print,
- **kwargs,
+ args=None,
):
super().__init__()
self.caterpillar_height = caterpillar_height
self.attention_dropout = attention_dropout
- ######################################################################
- # sup_args
-
- x = kwargs.get("gate_dropout")
- if x is None:
- self.proba_gate_dropout = 0.0
- else:
- self.proba_gate_dropout = float(x)
-
- logger(f"self.proba_gate_dropout {self.proba_gate_dropout}")
-
- x = kwargs.get("default_bg")
- if x is None:
- default_bg = -math.log(caterpillar_height - 1)
- else:
- default_bg = float(x)
-
- logger(f"default_bg {default_bg}")
+ self.gate_dropout_proba = args.gate_dropout_proba
+ self.gate_dropout_sync = args.gate_dropout_sync
######################################################################
+ default_bg = -math.log(caterpillar_height - 1)
self.w_G = randw(nb_heads, caterpillar_height, dim_model)
self.b_G = nn.Parameter(torch.full((nb_heads, caterpillar_height), default_bg))
torch.einsum("ntc,hrc->nhrt", X, self.w_G) + self.b_G[None, :, :, None]
).sigmoid()
- # warnings.warn("softmax gating", RuntimeWarning)
+ # Clip the gating to avoid values greater than 1 when several
+ # heads hit the same row
- # G = (
- # torch.einsum("ntc,hrc->nhrt", X, self.w_G) + self.b_G[None, :, :, None]
- # ).softmax(dim=2)
+ G = G / G.sum(1, keepdim=True).clamp(min=1)
######################################################################
- # The "flashbacks"
-
- if self.training and self.proba_gate_dropout > 0.0:
- # This is a better implementation of "flashbacks".
-
- # G is NxHxExT where e is the caterpillar's row.
-
- warnings.warn("gate dropout", RuntimeWarning)
-
- kill = (
- torch.rand(G.size(), device=G.device) <= self.proba_gate_dropout
- ).float()
-
- alpha = G / (1 - self.proba_gate_dropout)
-
- G = alpha * (1 - kill)
def recurrence(G, V, K):
- # Clip the gating to avoid values greater than 1 when several
- # heads hit the same row
-
- G = G / G.sum(1, keepdim=True).clamp(min=1)
-
# We prepare the arguments for the parallel scan
A = 1 - G.sum(1)
init_rec_V = self.rec_V[:, :, t0 - L : t0]
init_rec_K = self.rec_K[:, :, t0 - L : t0]
- # Associative scan
-
# Here there is a trick: Since the stack at position t is
# computed by updating that at position t-L, the parallel
# scan operates with a period of L. To do so we split the
next_V, next_K = recurrence(G, V, K)
+ if self.training and self.gate_dropout_proba > 0.0:
+ # G is NxHxRxT where r is the caterpillar's row.
+
+ warnings.warn("gate dropout", RuntimeWarning)
+
+ if self.gate_dropout_sync:
+ shape_kill = (N, 1, 1)
+ else:
+ shape_kill = (N, H, R)
+
+ # Pick a point in each of the NxHxR timeline and set this
+ # entry and the following to 1
+ kill = (
+ torch.rand(*shape_kill, t1 - t0, device=G.device).sort(dim=3).indices
+ == 0
+ ).cumsum(dim=3)
+
+ # Keep these mask for only some of the NxHxR
+ kill = kill * (
+ torch.rand(*shape_kill, 1, device=G.device) <= self.gate_dropout_proba
+ )
+
+ # The coefficient to keep are the complementary
+ mask = 1 - kill
+
+ masked_next_V, masked_next_K = recurrence(G * mask, V, K)
+
+ next_V = next_V.detach() + (masked_next_V - masked_next_V.detach()) / (
+ 1 - self.gate_dropout_proba
+ )
+ next_K = next_K.detach() + (masked_next_K - masked_next_K.detach()) / (
+ 1 - self.gate_dropout_proba
+ )
+
self.rec_V[:, :, t0:t1] = next_V
self.rec_K[:, :, t0:t1] = next_K
Q = torch.einsum("ntc,hdc->nhtd", X, self.w_Q)
- # We build tensors NxHxTxFxL where N is the sample index, H
- # the head, T the time, F the row in the caterpillar, and L
+ # We build tensors NxHxTxRxL where N is the sample index, H
+ # the head, T the time, R the row in the caterpillar, and L
# the column in the caterpillar
windowed_V = moving_window(
# We have an attention score for each of the RxL values
ar = torch.einsum(
- "nhtd,nftld->nhtfl",
+ "nhtd,nrtld->nhtrl",
Q,
windowed_K,
) / math.sqrt(DK)
causal=False,
attention_dropout=0.0,
logger=print,
- **kwargs,
+ args=None,
):
super().__init__()
len_max=1e5,
attention_layer="kvrec",
logger=print,
- **kwargs,
+ args=None,
):
super().__init__()
causal=causal,
attention_dropout=dropout,
logger=logger,
- **kwargs,
+ args=args,
)
elif attention_layer == "dumbrec":
return DumbRec(
nb_lines=nb_lines,
attention_dropout=dropout,
logger=logger,
- **kwargs,
+ args=args,
)
elif attention_layer == "kvrec":
return KVRec(
nb_lines=nb_lines,
attention_dropout=dropout,
logger=logger,
- **kwargs,
+ args=args,
)
elif attention_layer == "caterpillar":
return Caterpillar(
caterpillar_height=self.caterpillar_height,
attention_dropout=dropout,
logger=logger,
- **kwargs,
+ args=args,
)
else:
raise ValueError(f"Unknown attention type {attention_layer}.")