# 1 for the successive tokens.
#
# Modules able to process brackets may implement a cache that is
-# resetted when the input bracket starts at t=0
+# resetted when init_cache is True
class BracketedSequence:
self.attention_dropout = attention_dropout
warnings.warn("flash back", RuntimeWarning)
- self.proba_flashback = 0.1
+ self.proba_flashback = 1e-2
self.w_G = randw(nb_heads, caterpillar_height, dim_model)
self.b_G = nn.Parameter(
self.rec_K[:, :, t0:t1] = next_K.flatten(2, 3)
if self.training and self.proba_flashback > 0.0:
- # insert_flash_back(self.rec_V,V,self.rec_K,K,t0,t1,CL,proba=self.proba_flashback / CL,)
-
# This piece of code makes the assumption that there is
# nothing informative before t0, otherwise we'd have to
# implement a cache for V and K too. This should not be
src_time = t - u - t0
src_head = torch.randint(H, (N, CH, t1 - t0, 1), device=X.device)
- mask_V = (
+ mask = (
torch.rand(N, CH, t1 - t0, DV, device=X.device) <= self.proba_flashback
).long()
+
self.rec_V[:, :, t0:t1] = (
- mask_V * V[n, src_head, src_time, dv]
- + (1 - mask_V) * self.rec_V[:, :, t0:t1]
+ mask * V[n, src_head, src_time, dv]
+ + (1 - mask) * self.rec_V[:, :, t0:t1]
)
- mask_K = (
- torch.rand(N, CH, t1 - t0, DK, device=X.device) <= self.proba_flashback
- ).long()
self.rec_K[:, :, t0:t1] = (
- mask_K * K[n, src_head, src_time, dk]
- + (1 - mask_K) * self.rec_K[:, :, t0:t1]
+ mask * K[n, src_head, src_time, dk]
+ + (1 - mask) * self.rec_K[:, :, t0:t1]
)
######################################################################