nb_lines,
attention_dropout=0.0,
len_max=1e5,
+ logger=print,
+ **kwargs,
):
super().__init__()
nb_lines,
attention_dropout=0.0,
len_max=1e5,
+ logger=print,
+ **kwargs,
):
super().__init__()
##############################
+class Calibrator:
+ def __init__(self, w=None, b=None):
+ self.w = w
+ self.b = b
+ self.s, self.s_sq, self.n = 0, 0, 0
+ self.mean, self.std = 0, 0
+
+ def update(self, X):
+ X = X.detach()
+ self.s += X.sum(dim=0)
+ self.s_sq += X.pow(2).sum(dim=0)
+ self.n += X.size(0)
+
+ def moments(self):
+ mean = self.s / self.n
+ std = (self.s_sq / self.n - mean * mean).sqrt()
+ return mean, std
+
+ def normalize(self):
+ mean, std = self.moments()
+ if self.b is not None:
+ self.b.sub_(mean)
+ if self.w is not None:
+ self.w.div_(std)
+ result = mean - self.mean, std - self.std
+ self.mean, self.std = mean, std
+ self.s, self.s_sq, self.n = 0, 0, 0
+ return result
+
+
class Caterpillar(nn.Module):
def __init__(
self,
caterpillar_height,
attention_dropout=0.0,
len_max=1e5,
+ logger=print,
+ **kwargs,
):
super().__init__()
self.caterpillar_height = caterpillar_height
self.attention_dropout = attention_dropout
- self.proba_gate_dropout = 0.0
+ ######################################################################
+ # sup_args
+
+ x = kwargs.get("gate_dropout")
+ if x is None:
+ self.proba_gate_dropout = 0.0
+ else:
+ self.proba_gate_dropout = float(x)
+
+ logger(f"self.proba_gate_dropout {self.proba_gate_dropout}")
+
+ x = kwargs.get("default_bg")
+ if x is None:
+ default_bg = -math.log(caterpillar_height - 1)
+ else:
+ default_bg = float(x)
+
+ logger(f"default_bg {default_bg}")
+
+ ######################################################################
self.w_G = randw(nb_heads, caterpillar_height, dim_model)
- self.b_G = nn.Parameter(
- torch.full(
- (nb_heads, caterpillar_height), -math.log(caterpillar_height - 1)
- )
- )
+ self.b_G = nn.Parameter(torch.full((nb_heads, caterpillar_height), default_bg))
self.w_K = randw(nb_heads, dim_qk, dim_model)
self.w_V = randw(nb_heads, dim_v, dim_model)
dim_v,
)
+ self.calibrator_G = Calibrator()
+ self.calibrator_rec_V = Calibrator()
+ self.calibrator_rec_K = Calibrator()
+
def reset_inner_loss(self):
self.acc_attention = 0
self.acc_nb = 0
self.rec_K = X.new_zeros(N, R, T, DK)
# We start the recurrent sequences with optimizable
# initial values. No idea if it helps.
- self.rec_V[:, :, t0 - L : t0] = self.init_V_rec[None, :, :, :]
- self.rec_K[:, :, t0 - L : t0] = self.init_K_rec[None, :, :, :]
+ self.rec_V[:, :, t0 - L : t0, :] = self.init_V_rec[None, :, :, :]
+ self.rec_K[:, :, t0 - L : t0, :] = self.init_K_rec[None, :, :, :]
self.cache_Y = X.new_zeros(N, T, DM)
torch.einsum("ntc,hrc->nhrt", X, self.w_G) + self.b_G[None, :, :, None]
).sigmoid()
- ######################################################################
- # Roll the gating indexes
+ self.calibrator_G.update(G.reshape(-1, G.size(-1)))
- warnings.warn("rotating barrel", RuntimeWarning)
+ # warnings.warn("softmax gating", RuntimeWarning)
- r_barrel = torch.arange(R, device=G.device)[None, None, :, None]
- t_barrel = torch.arange(t1 - t0, device=G.device)[None, None, None, :]
- r_barrel = (r_barrel + (t_barrel + t0) // L) % R
- G = G.gather(dim=2, index=r_barrel.expand_as(G))
+ # G = (
+ # torch.einsum("ntc,hrc->nhrt", X, self.w_G) + self.b_G[None, :, :, None]
+ # ).softmax(dim=2)
######################################################################
# The "flashbacks"
# G is NxHxExT where e is the caterpillar's row.
warnings.warn("gate dropout", RuntimeWarning)
- epsilon = 0.5
- dropout_head = (
- (torch.rand(N, H, 1, t1 - t0, device=G.device).sort(dim=3).indices == 0)
- .expand_as(G)
- .float()
- )
+ kill = (
+ torch.rand(G.size(), device=G.device) <= self.proba_gate_dropout
+ ).float()
- dropout_tail = dropout_head.cumsum(dim=3) - dropout_head
+ alpha = G / (1 - self.proba_gate_dropout)
- dropout_active = (
- torch.rand(N, 1, 1, 1, device=G.device) < self.proba_gate_dropout
- ).long()
+ G = alpha * (1 - kill)
- dropout_head *= dropout_active
- dropout_tail *= dropout_active
+ ######################################################################
+ # Clip the gating to avoid values greater than 1 when several
+ # heads hit the same row
- G = (
- G
- + dropout_head * (1 - epsilon - G.detach())
- - dropout_tail * G.detach()
- )
+ G = G / G.sum(1, keepdim=True).clamp(min=1)
######################################################################
+ # Roll the gating indexes
- # We prepare the arguments for the parallel scan
+ # warnings.warn("rotating barrel", RuntimeWarning)
- # Clip the gating to avoid values greater than 1 when several
- # heads hit the same row
+ # r_barrel = torch.arange(R, device=G.device)[None, None, :, None]
+ # t_barrel = torch.arange(t1 - t0, device=G.device)[None, None, None, :]
+ # r_barrel = (r_barrel + (t_barrel + t0) // L) % R
+ # G = G.gather(dim=2, index=r_barrel.expand_as(G))
- G = G / G.sum(1, keepdim=True).clamp(min=1)
+ # We prepare the arguments for the parallel scan
A = 1 - G.sum(1)
next_V = pscan_dim(A, gated_V, init_rec_V, dim=2)
next_K = pscan_dim(A, gated_K, init_rec_K, dim=2)
- self.rec_V[:, :, t0:t1] = next_V.flatten(2, 3)
- self.rec_K[:, :, t0:t1] = next_K.flatten(2, 3)
+ next_V = next_V.flatten(2, 3)
+ next_K = next_K.flatten(2, 3)
+
+ self.calibrator_rec_V.update(
+ next_V.permute(0, 1, 3, 2).reshape(-1, next_V.size(2))
+ )
+ self.calibrator_rec_K.update(
+ next_K.permute(0, 1, 3, 2).reshape(-1, next_K.size(2))
+ )
+
+ self.rec_V[:, :, t0:t1] = next_V
+ self.rec_K[:, :, t0:t1] = next_K
######################################################################
# compute the readout
nb_heads=1,
causal=False,
attention_dropout=0.0,
+ logger=print,
+ **kwargs,
):
super().__init__()
dropout=0.0,
len_max=1e5,
attention_layer="kvrec",
+ logger=print,
+ **kwargs,
):
super().__init__()
nb_heads=nb_heads,
causal=causal,
attention_dropout=dropout,
+ logger=logger,
+ **kwargs,
)
elif attention_layer == "dumbrec":
return DumbRec(
nb_heads=nb_heads,
nb_lines=nb_lines,
attention_dropout=dropout,
+ logger=logger,
+ **kwargs,
)
elif attention_layer == "kvrec":
return KVRec(
nb_heads=nb_heads,
nb_lines=nb_lines,
attention_dropout=dropout,
+ logger=logger,
+ **kwargs,
)
elif attention_layer == "caterpillar":
return Caterpillar(
caterpillar_length=self.caterpillar_length,
caterpillar_height=self.caterpillar_height,
attention_dropout=dropout,
+ logger=logger,
+ **kwargs,
)
else:
raise ValueError(f"Unknown attention type {attention_layer}.")