- if self.training and self.proba_flashback > 0.0:
- warnings.warn("flash back", RuntimeWarning)
- # This piece of code makes the assumption that there is
- # nothing informative before t0, otherwise we'd have to
- # implement a cache for V and K too. This should not be
- # too much of a problem since this is used only during
- # train, where full sequence are available
-
- n = torch.arange(N, device=X.device)[:, None, None, None]
- t = torch.arange(t0, t1, device=X.device)[None, None, :, None]
- dv = torch.arange(DV, device=X.device)[None, None, None, :]
- dk = torch.arange(DK, device=X.device)[None, None, None, :]
-
- u = (
- torch.rand(N, CH, t1 - t0, 1, device=X.device).mul(t).long() // CL
- ) * CL
-
- src_time = t - u - t0
- src_head = torch.randint(H, (N, CH, t1 - t0, 1), device=X.device)
-
- mask = (
- torch.rand(N, CH, t1 - t0, DV, device=X.device) <= self.proba_flashback
- ).long()
-
- self.rec_V[:, :, t0:t1] = (
- mask * V[n, src_head, src_time, dv]
- + (1 - mask) * self.rec_V[:, :, t0:t1]
+ next_V, next_K = recurrence(G, V, K)
+
+ if self.training and self.gate_dropout_proba > 0.0:
+ # G is NxHxRxT where r is the caterpillar's row.
+
+ warnings.warn("gate dropout", RuntimeWarning)
+
+ if self.gate_dropout_sync:
+ shape_kill = (N, 1, 1)
+ else:
+ shape_kill = (N, H, R)
+
+ # Pick a point in each of the NxHxR timeline and set this
+ # entry and the following to 1
+ kill = (
+ torch.rand(*shape_kill, t1 - t0, device=G.device).sort(dim=3).indices
+ == 0
+ ).cumsum(dim=3)
+
+ # Keep these mask for only some of the NxHxR
+ kill = kill * (
+ torch.rand(*shape_kill, 1, device=G.device) <= self.gate_dropout_proba