nb_lines,
attention_dropout=0.0,
len_max=1e5,
+ logger=print,
+ **kwargs,
):
super().__init__()
nb_lines,
attention_dropout=0.0,
len_max=1e5,
+ logger=print,
+ **kwargs,
):
super().__init__()
caterpillar_height,
attention_dropout=0.0,
len_max=1e5,
+ logger=print,
+ **kwargs,
):
super().__init__()
self.proba_gate_dropout = 0.0
+ default_b_G = kwargs.get("default_b_G")
+ if default_b_G is None:
+ default_b_G = -math.log(caterpillar_height - 1)
+
+ logger(f"default_b_G {default_b_G}")
+
self.w_G = randw(nb_heads, caterpillar_height, dim_model)
- self.b_G = nn.Parameter(
- torch.full(
- (nb_heads, caterpillar_height), -math.log(caterpillar_height - 1)
- )
- )
+ self.b_G = nn.Parameter(torch.full((nb_heads, caterpillar_height), default_b_G))
self.w_K = randw(nb_heads, dim_qk, dim_model)
self.w_V = randw(nb_heads, dim_v, dim_model)
torch.einsum("ntc,hrc->nhrt", X, self.w_G) + self.b_G[None, :, :, None]
).sigmoid()
- ######################################################################
- # Roll the gating indexes
+ # Clip the gating to avoid values greater than 1 when several
+ # heads hit the same row
- warnings.warn("rotating barrel", RuntimeWarning)
- n_barrel = torch.arange(N, device=G.device)[:, None, None, None]
- h_barrel = torch.arange(H, device=G.device)[None, :, None, None]
- r_barrel = torch.arange(R, device=G.device)[None, None, :, None]
- t_barrel = torch.arange(t1 - t0, device=G.device)[None, None, None, :]
- r_barrel = (r_barrel + t_barrel + t0) % R
+ G = G / G.sum(1, keepdim=True).clamp(min=1)
- # print(f"({N}, {H}, {R}, {t1-t0}) {G.size()=}")
+ ######################################################################
+ # Roll the gating indexes
- G = G[n_barrel, h_barrel, r_barrel, t_barrel]
+ # warnings.warn("rotating barrel", RuntimeWarning)
- # print(G.sum())
+ # r_barrel = torch.arange(R, device=G.device)[None, None, :, None]
+ # t_barrel = torch.arange(t1 - t0, device=G.device)[None, None, None, :]
+ # r_barrel = (r_barrel + (t_barrel + t0) // L) % R
+ # G = G.gather(dim=2, index=r_barrel.expand_as(G))
######################################################################
# The "flashbacks"
# We prepare the arguments for the parallel scan
- # Clip the gating to avoid values greater than 1 when several
- # heads hit the same row
-
- G = G / G.sum(1, keepdim=True).clamp(min=1)
-
A = 1 - G.sum(1)
# warnings.warn("harmonic recurrence", RuntimeWarning)
nb_heads=1,
causal=False,
attention_dropout=0.0,
+ logger=print,
+ **kwargs,
):
super().__init__()
dropout=0.0,
len_max=1e5,
attention_layer="kvrec",
+ logger=print,
+ **kwargs,
):
super().__init__()
nb_heads=nb_heads,
causal=causal,
attention_dropout=dropout,
+ logger=logger,
+ **kwargs,
)
elif attention_layer == "dumbrec":
return DumbRec(
nb_heads=nb_heads,
nb_lines=nb_lines,
attention_dropout=dropout,
+ logger=logger,
+ **kwargs,
)
elif attention_layer == "kvrec":
return KVRec(
nb_heads=nb_heads,
nb_lines=nb_lines,
attention_dropout=dropout,
+ logger=logger,
+ **kwargs,
)
elif attention_layer == "caterpillar":
return Caterpillar(
caterpillar_length=self.caterpillar_length,
caterpillar_height=self.caterpillar_height,
attention_dropout=dropout,
+ logger=logger,
+ **kwargs,
)
else:
raise ValueError(f"Unknown attention type {attention_layer}.")