X-Git-Url: https://fleuret.org/cgi-bin/gitweb/gitweb.cgi?a=blobdiff_plain;f=mygpt.py;h=eda8685b8a58653d6739cf4016f412528db378ce;hb=037adb139441f40078421cd40f6aad1748c2724d;hp=7105e97c351138b6dbb9e382d99c29293b1f594c;hpb=b458d5aa1f2ba736807e87b65ccad2f96b216a10;p=mygptrnn.git diff --git a/mygpt.py b/mygpt.py index 7105e97..eda8685 100755 --- a/mygpt.py +++ b/mygpt.py @@ -442,7 +442,8 @@ class KVRec(nn.Module): # Returns a tensor with an additional index at rank win_dim, that move -# along the same dimension as dim, on a domain {0...win_size-1} +# along the same dimension as dim, on a domain {0...win_size-1}, and +# dim is restricted on a domain reduced by win_size-1 values. def moving_window(x, dim, win_dim, win_size): @@ -480,6 +481,9 @@ class Caterpillar(nn.Module): self.caterpillar_height = caterpillar_height self.attention_dropout = attention_dropout + warnings.warn("flash back", RuntimeWarning) + self.proba_flashback = 0.1 + self.w_G = randw(nb_heads, caterpillar_height, dim_model) self.b_G = nn.Parameter( torch.full( @@ -511,9 +515,10 @@ class Caterpillar(nn.Module): N = bs.x.size(0) T = bs.x.size(1) + H = self.w_V.size(0) DV = self.w_V.size(1) DK = self.w_K.size(1) - Dout = self.w_O.size(1) + DM = self.w_O.size(1) CH = self.caterpillar_height CL = self.caterpillar_length @@ -521,6 +526,8 @@ class Caterpillar(nn.Module): t0 >= CL and (t1 - t0) % CL == 0 ), f"bs.first should be greater than caterpillar_length, and bs.nb should be a multiple of caterpillar_length" + # We cache values to deal efficiently with auto-regression + if bs.init_cache: self.rec_V = X.new_zeros(N, CH, T, DV) self.rec_K = X.new_zeros(N, CH, T, DK) @@ -529,7 +536,7 @@ class Caterpillar(nn.Module): self.rec_V[:, :, t0 - CL : t0] = self.init_V_rec[None, :, :, :] self.rec_K[:, :, t0 - CL : t0] = self.init_K_rec[None, :, :, :] - self.cache_Y = X.new_zeros(N, T, Dout) + self.cache_Y = X.new_zeros(N, T, DM) ###################################################################### # Compute the recurrent state @@ -537,13 +544,16 @@ class Caterpillar(nn.Module): # This is the Gating sequence that modulates the storing of # the new key and value in the CH pairs of the current # stack. The CH gating values are independent, which means - # that the current K/V could be stored in all the pairs of the + # that the current K/V could be stored in multiple pairs of the # recurrent state, or not at all. G = ( torch.einsum("ntc,hec->nhet", X, self.w_G) + self.b_G[None, :, :, None] ).sigmoid() + # That bas a bad idea + # G = F.dropout(G, self.attention_dropout, self.training) + V = torch.einsum("ntc,hdc->nhtd", X, self.w_V) K = torch.einsum("ntc,hdc->nhtd", X, self.w_K) @@ -560,7 +570,7 @@ class Caterpillar(nn.Module): # by updating that at time t-L, the parallel scan operates # with a period of L. To do so we split the time indexing in # two axes, the second of size CL, and run the parallel scan - # using the other alone as the sequence index. + # using the other as the sequence index. A = A.unflatten(2, (-1, CL)) gated_V = gated_V.unflatten(2, (-1, CL)) @@ -574,6 +584,43 @@ class Caterpillar(nn.Module): self.rec_V[:, :, t0:t1] = next_V.flatten(2, 3) self.rec_K[:, :, t0:t1] = next_K.flatten(2, 3) + if self.training and self.proba_flashback > 0.0: + # insert_flash_back(self.rec_V,V,self.rec_K,K,t0,t1,CL,proba=self.proba_flashback / CL,) + + # This piece of code makes the assumption that there is + # nothing informative before t0, otherwise we'd have to + # implement a cache for V and K too. This should not be + # too much of a problem since this is used only during + # train, where full sequence are available + + n = torch.arange(N, device=X.device)[:, None, None, None] + t = torch.arange(t0, t1, device=X.device)[None, None, :, None] + dv = torch.arange(DV, device=X.device)[None, None, None, :] + dk = torch.arange(DK, device=X.device)[None, None, None, :] + + u = ( + torch.rand(N, CH, t1 - t0, 1, device=X.device).mul(t).long() // CL + ) * CL + + src_time = t - u - t0 + src_head = torch.randint(H, (N, CH, t1 - t0, 1), device=X.device) + + mask_V = ( + torch.rand(N, CH, t1 - t0, DV, device=X.device) <= self.proba_flashback + ).long() + self.rec_V[:, :, t0:t1] = ( + mask_V * V[n, src_head, src_time, dv] + + (1 - mask_V) * self.rec_V[:, :, t0:t1] + ) + + mask_K = ( + torch.rand(N, CH, t1 - t0, DK, device=X.device) <= self.proba_flashback + ).long() + self.rec_K[:, :, t0:t1] = ( + mask_K * K[n, src_head, src_time, dk] + + (1 - mask_K) * self.rec_K[:, :, t0:t1] + ) + ###################################################################### # compute the readout