2 ######################################################################
4 2024 Jan 07 21:37:48 (from mygpt.py)
7 # This is one order of magnitude more complicated than I expected, not
8 # elegant, slow, hopefully not buggy
11 def flash_back_time_src(N, H, t0, t1, CL, CH, proba, device):
12 # starting flash backs
13 fb_start = (torch.rand(N, CH, t1 - t0, device=device) <= proba).long()
14 fb_start[:, :, -CL:] = 0
15 fb_start[:, :, :CL] = 0
17 # Remove series longer than CL
18 fb_body = fb_start.clone()
19 fb_body[:, :, CL + 1 :] -= fb_start[:, :, : -(CL + 1)]
20 fb_body = fb_body.cumsum(dim=2)
21 fb_start = fb_start * (fb_body == 1)
23 # Set a origin source time (starting time of the chunck to copy
24 # here) We set it as the current time minus a multiple of CL to be
25 # consistent with the "rolling" caterpillar
26 t = torch.arange(fb_start.size(2), device=fb_start.device)[None, None, :]
27 src_time = fb_start * (
33 torch.rand(fb_start.size(), device=fb_start.device) * (t // CL - 1)
37 src_time[:, :, CL:] -= src_time.clone()[:, :, :-CL]
38 src_time = src_time.cumsum(dim=2)
40 src_head = fb_start * torch.randint(H, fb_start.size(), device=fb_start.device)
41 src_head[:, :, CL:] -= src_head.clone()[:, :, :-CL]
42 src_head = src_head.cumsum(dim=2)
45 src_delta = fb_start.clone()
46 src_delta[:, :, CL:] -= fb_start[:, :, :-CL]
47 src_delta = src_delta.cumsum(dim=2)
48 src_delta[:, :, CL:] -= CL * fb_start[:, :, :-CL]
49 src_time += src_delta.cumsum(dim=2) - 1
51 return src_time, src_head
54 def insert_flash_back(rec_V, V, rec_K, K, t0, t1, CL, proba):
55 N, H, CH = V.size(0), V.size(1), rec_V.size(1)
57 fbt, fbh = flash_back_time_src(N, H, t0, t1, CL, CH, proba, rec_V.device)
59 fbt_V = fbt[:, :, :, None]
60 fbh_V = fbh[:, :, :, None]
61 t = fbt_V.clamp(min=0)
62 n = torch.arange(V.size(0), device=V.device)[:, None, None, None]
63 d = torch.arange(V.size(3), device=V.device)[None, None, None, :]
64 q = V[:, :, t0:t1][n, fbh_V, t, d]
65 rec_V[:, :, t0:t1] = q * (fbt_V >= 0) + rec_V[:, :, t0:t1] * (fbt_V < 0)
67 fbt_K = fbt[:, :, :, None]
68 fbh_K = fbh[:, :, :, None]
69 t = fbt_K.clamp(min=0)
70 n = torch.arange(K.size(0), device=K.device)[:, None, None, None]
71 d = torch.arange(K.size(3), device=K.device)[None, None, None, :]
72 q = K[:, :, t0:t1][n, fbh_K, t, d]
73 rec_K[:, :, t0:t1] = q * (fbt_K >= 0) + rec_K[:, :, t0:t1] * (fbt_K < 0)
76 ######################################################################
78 ######################################################################
80 2024 Jan 07 21:38:11 (from mygpt.py)
82 # insert_flash_back(self.rec_V,V,self.rec_K,K,t0,t1,CL,proba=self.proba_flashback / CL,)
85 ######################################################################
87 2024 Jan 09 14:24:42 (from mygpt.py)
89 # This piece of code makes the assumption that there is
90 # nothing informative before t0, otherwise we'd have to
91 # implement a cache for V and K too. This should not be
92 # too much of a problem since this is used only during
93 # train, where full sequence are available
95 # n = torch.arange(N, device=X.device)[:, None, None, None]
96 # t = torch.arange(t0, t1, device=X.device)[None, None, :, None]
97 # dv = torch.arange(DV, device=X.device)[None, None, None, :]
98 # dk = torch.arange(DK, device=X.device)[None, None, None, :]
101 # torch.rand(N, CH, t1 - t0, 1, device=X.device).mul(t).long() // CL
104 # src_time = t - u - t0
105 # src_head = torch.randint(H, (N, CH, t1 - t0, 1), device=X.device)
108 # torch.rand(N, CH, t1 - t0, DV, device=X.device) <= self.proba_flashback
111 # self.rec_V[:, :, t0:t1] = (
112 # mask * V[n, src_head, src_time, dv]
113 # + (1 - mask) * self.rec_V[:, :, t0:t1]
116 # self.rec_K[:, :, t0:t1] = (
117 # mask * K[n, src_head, src_time, dk]
118 # + (1 - mask) * self.rec_K[:, :, t0:t1]
121 ######################################################################
123 2024 Jan 10 08:10:39 (from mygpt.py)
125 # That was a bad idea
126 # G = F.dropout(G, self.attention_dropout, self.training)
129 ######################################################################
131 2024 Jan 10 08:46:13 (from mygpt.py)
133 #################################################################
134 # Flashbacks. This version sucks, about to replace it
135 if self.training and self.proba_flashback > 0.0:
136 warnings.warn("flash back", RuntimeWarning)
137 # This piece of code makes the assumption that there is
138 # nothing informative before t0, otherwise we'd have to
139 # implement a cache for V and K too. This should not be
140 # too much of a problem since this is used only during
141 # train, where full sequence are available
143 n = torch.arange(N, device=X.device)[:, None, None, None]
144 t = torch.arange(t0, t1, device=X.device)[None, None, :, None]
145 dv = torch.arange(DV, device=X.device)[None, None, None, :]
146 dk = torch.arange(DK, device=X.device)[None, None, None, :]
149 torch.rand(N, CH, t1 - t0, 1, device=X.device).mul(t).long() // CL
152 src_time = t - u - t0
153 src_head = torch.randint(H, (N, CH, t1 - t0, 1), device=X.device)
156 torch.rand(N, CH, t1 - t0, DV, device=X.device) <= self.proba_flashback
159 self.rec_V[:, :, t0:t1] = (
160 mask * V[n, src_head, src_time, dv]
161 + (1 - mask) * self.rec_V[:, :, t0:t1]
164 self.rec_K[:, :, t0:t1] = (
165 mask * K[n, src_head, src_time, dk]
166 + (1 - mask) * self.rec_K[:, :, t0:t1]
170 ######################################################################
172 2024 Jan 13 13:38:31 (from mygpt.py)
174 g= F.sigmoid(self.b_G)
177 print(f"\n\nSANITY {a**T}\n")
181 ######################################################################
183 2024 Jan 14 13:39:37 (from mygpt.py)
188 (torch.rand(N, H, 1, t1 - t0, device=G.device).sort(dim=3).indices == 0)
193 dropout_tail = dropout_head.cumsum(dim=3) - dropout_head
196 torch.rand(N, 1, 1, 1, device=G.device) < self.proba_gate_dropout
199 dropout_head *= dropout_active
200 dropout_tail *= dropout_active
204 + dropout_head * (1 - epsilon - G.detach())
205 - dropout_tail * G.detach()
208 ######################################################################
210 2024 Jan 18 07:39:29 (from mygpt.py)
213 def __init__(self, w=None, b=None):
216 self.s, self.s_sq, self.n = 0, 0, 0
217 self.mean, self.std = 0, 0
221 self.s += X.sum(dim=0)
222 self.s_sq += X.pow(2).sum(dim=0)
226 mean = self.s / self.n
227 std = (self.s_sq / self.n - mean * mean).sqrt()
231 mean, std = self.moments()
232 if self.b is not None:
234 if self.w is not None:
236 result = mean - self.mean, std - self.std
237 self.mean, self.std = mean, std
238 self.s, self.s_sq, self.n = 0, 0, 0
243 ######################################################################
245 2024 Jan 18 07:39:34 (from mygpt.py)
247 # self.calibrator_G = Calibrator()
248 # self.calibrator_rec_V = Calibrator()
249 # self.calibrator_rec_K = Calibrator()
252 ######################################################################
254 2024 Jan 18 07:39:37 (from mygpt.py)
256 # self.calibrator_G.update(G.reshape(-1, G.size(-1)))
259 ######################################################################
261 2024 Jan 18 07:39:42 (from mygpt.py)
263 # self.calibrator_rec_V.update(
264 # next_V.permute(0, 1, 3, 2).reshape(-1, next_V.size(2))
266 # self.calibrator_rec_K.update(
267 # next_K.permute(0, 1, 3, 2).reshape(-1, next_K.size(2))
271 ######################################################################
273 2024 Jan 18 07:47:12 (from mygpt.py)
275 ######################################################################
276 # Roll the gating indexes
278 # warnings.warn("rotating barrel", RuntimeWarning)
280 # r_barrel = torch.arange(R, device=G.device)[None, None, :, None]
281 # t_barrel = torch.arange(t1 - t0, device=G.device)[None, None, None, :]
282 # r_barrel = (r_barrel + (t_barrel + t0) // L) % R
283 # G = G.gather(dim=2, index=r_barrel.expand_as(G))
286 ######################################################################
288 2024 Jan 18 07:47:25 (from mygpt.py)
290 # warnings.warn("harmonic recurrence", RuntimeWarning)
291 # har = torch.arange(t0, t1, device = G.device).float() + 1
292 # A = har / (har + 1)
296 ######################################################################
298 2024 Jan 18 08:46:18 (from mygpt.py)
300 # warnings.warn("softmax gating", RuntimeWarning)
303 # torch.einsum("ntc,hrc->nhrt", X, self.w_G) + self.b_G[None, :, :, None]