X-Git-Url: https://fleuret.org/cgi-bin/gitweb/gitweb.cgi?a=blobdiff_plain;f=mygpt.py;h=de69a755f9510daff3669c252fb14a8bec4b3148;hb=f3f490def0be8a3ea2b9a0ac60f5bb33c5c45fb5;hp=7cecc225a088f2e734edcbe590b31ef6d84f16ef;hpb=be092b9d616934edddef63556ad133e9ad9aaf2b;p=mygptrnn.git diff --git a/mygpt.py b/mygpt.py index 7cecc22..de69a75 100755 --- a/mygpt.py +++ b/mygpt.py @@ -181,7 +181,7 @@ def nsum_shape(X, Y_init): class DumbRec(nn.Module): def __init__( self, - dim_in, + dim_model, dim_qk, dim_v, nb_heads, @@ -199,11 +199,11 @@ class DumbRec(nn.Module): self.k_star = randw(nb_lines, dim_qk) - self.w_qw = randw(nb_heads, dim_qk, dim_in) - self.w_qr = randw(nb_heads, dim_qk, dim_in) - # self.w_k = randw(nb_heads, dim_qk, dim_in) - self.w_v = randw(nb_heads, dim_v, dim_in) - self.w_o = randw(dim_v * nb_heads, dim_in) + self.w_qw = randw(nb_heads, dim_qk, dim_model) + self.w_qr = randw(nb_heads, dim_qk, dim_model) + # self.w_k = randw(nb_heads, dim_qk, dim_model) + self.w_v = randw(nb_heads, dim_v, dim_model) + self.w_o = randw(dim_v * nb_heads, dim_model) def reset_inner_loss(self): self.acc_attention = 0 @@ -310,7 +310,7 @@ class DumbRec(nn.Module): class KVRec(nn.Module): def __init__( self, - dim_in, + dim_model, dim_qk, dim_v, nb_heads, @@ -328,11 +328,11 @@ class KVRec(nn.Module): self.k_star = randw(nb_lines, dim_qk) - self.w_qw = randw(nb_heads, dim_qk, dim_in) - self.w_qr = randw(nb_heads, dim_qk, dim_in) - self.w_k = randw(nb_heads, dim_qk, dim_in) - self.w_v = randw(nb_heads, dim_v, dim_in) - self.w_o = randw(dim_v * nb_heads, dim_in) + self.w_qw = randw(nb_heads, dim_qk, dim_model) + self.w_qr = randw(nb_heads, dim_qk, dim_model) + self.w_k = randw(nb_heads, dim_qk, dim_model) + self.w_v = randw(nb_heads, dim_v, dim_model) + self.w_o = randw(dim_v * nb_heads, dim_model) def reset_inner_loss(self): self.acc_attention = 0 @@ -441,6 +441,11 @@ class KVRec(nn.Module): ############################## +# Returns a tensor with an additional index at rank win_dim, that move +# along the same dimension as dim, on a domain {0...win_size-1}, and +# dim is restricted on a domain reduced by win_size-1 values. + + def moving_window(x, dim, win_dim, win_size): size, stride = x.size(), x.stride() size = size[:dim] + (size[dim] - win_size + 1,) + size[dim + 1 :] @@ -456,7 +461,7 @@ def moving_window(x, dim, win_dim, win_size): class Caterpillar(nn.Module): def __init__( self, - dim_in, + dim_model, dim_qk, dim_v, nb_heads, @@ -476,17 +481,20 @@ class Caterpillar(nn.Module): self.caterpillar_height = caterpillar_height self.attention_dropout = attention_dropout - self.w_G = randw(nb_heads, caterpillar_height, dim_in) + warnings.warn("flash back", RuntimeWarning) + self.proba_flashback = 0.1 + + self.w_G = randw(nb_heads, caterpillar_height, dim_model) self.b_G = nn.Parameter( torch.full( (nb_heads, caterpillar_height), -math.log(caterpillar_height - 1) ) ) - self.w_K = randw(nb_heads, dim_qk, dim_in) - self.w_V = randw(nb_heads, dim_v, dim_in) - self.w_Q = randw(nb_heads, dim_qk, dim_in) - self.w_O = randw(dim_v * nb_heads, dim_in) + self.w_K = randw(nb_heads, dim_qk, dim_model) + self.w_V = randw(nb_heads, dim_v, dim_model) + self.w_Q = randw(nb_heads, dim_qk, dim_model) + self.w_O = randw(dim_v * nb_heads, dim_model) self.init_K_rec = randw(caterpillar_height, caterpillar_length, dim_qk) self.init_V_rec = randw(caterpillar_height, caterpillar_length, dim_v) @@ -507,9 +515,10 @@ class Caterpillar(nn.Module): N = bs.x.size(0) T = bs.x.size(1) + H = self.w_V.size(0) DV = self.w_V.size(1) DK = self.w_K.size(1) - Dout = self.w_O.size(1) + DM = self.w_O.size(1) CH = self.caterpillar_height CL = self.caterpillar_length @@ -517,6 +526,8 @@ class Caterpillar(nn.Module): t0 >= CL and (t1 - t0) % CL == 0 ), f"bs.first should be greater than caterpillar_length, and bs.nb should be a multiple of caterpillar_length" + # We cache values to deal efficiently with auto-regression + if bs.init_cache: self.rec_V = X.new_zeros(N, CH, T, DV) self.rec_K = X.new_zeros(N, CH, T, DK) @@ -525,21 +536,24 @@ class Caterpillar(nn.Module): self.rec_V[:, :, t0 - CL : t0] = self.init_V_rec[None, :, :, :] self.rec_K[:, :, t0 - CL : t0] = self.init_K_rec[None, :, :, :] - self.cache_Y = X.new_zeros(N, T, Dout) + self.cache_Y = X.new_zeros(N, T, DM) ###################################################################### # Compute the recurrent state - # This is the Gating sequence that modulates if they key and - # values should be stored in one of the CH pairs of the - # current stack. The CH gating values are independent, which - # means that the same thing could be stored up to CH times or - # not at all + # This is the Gating sequence that modulates the storing of + # the new key and value in the CH pairs of the current + # stack. The CH gating values are independent, which means + # that the current K/V could be stored in multiple pairs of the + # recurrent state, or not at all. G = ( torch.einsum("ntc,hec->nhet", X, self.w_G) + self.b_G[None, :, :, None] ).sigmoid() + # That bas a bad idea + # G = F.dropout(G, self.attention_dropout, self.training) + V = torch.einsum("ntc,hdc->nhtd", X, self.w_V) K = torch.einsum("ntc,hdc->nhtd", X, self.w_K) @@ -552,10 +566,11 @@ class Caterpillar(nn.Module): init_rec_V = self.rec_V[:, :, t0 - CL : t0] init_rec_K = self.rec_K[:, :, t0 - CL : t0] - # Here there is a trick: The parallel scan operates with a - # period of L, so we split the sequence indexing in two axes, - # the second of size CL, and run the parallel scan using the - # other alone as the sequence index. + # Here there is a trick: Since the stack at time t is computed + # by updating that at time t-L, the parallel scan operates + # with a period of L. To do so we split the time indexing in + # two axes, the second of size CL, and run the parallel scan + # using the other as the sequence index. A = A.unflatten(2, (-1, CL)) gated_V = gated_V.unflatten(2, (-1, CL)) @@ -569,6 +584,41 @@ class Caterpillar(nn.Module): self.rec_V[:, :, t0:t1] = next_V.flatten(2, 3) self.rec_K[:, :, t0:t1] = next_K.flatten(2, 3) + if self.training and self.proba_flashback > 0.0: + # This piece of code makes the assumption that there is + # nothing informative before t0, otherwise we'd have to + # implement a cache for V and K too. This should not be + # too much of a problem since this is used only during + # train, where full sequence are available + + n = torch.arange(N, device=X.device)[:, None, None, None] + t = torch.arange(t0, t1, device=X.device)[None, None, :, None] + dv = torch.arange(DV, device=X.device)[None, None, None, :] + dk = torch.arange(DK, device=X.device)[None, None, None, :] + + u = ( + torch.rand(N, CH, t1 - t0, 1, device=X.device).mul(t).long() // CL + ) * CL + + src_time = t - u - t0 + src_head = torch.randint(H, (N, CH, t1 - t0, 1), device=X.device) + + mask_V = ( + torch.rand(N, CH, t1 - t0, DV, device=X.device) <= self.proba_flashback + ).long() + self.rec_V[:, :, t0:t1] = ( + mask_V * V[n, src_head, src_time, dv] + + (1 - mask_V) * self.rec_V[:, :, t0:t1] + ) + + mask_K = ( + torch.rand(N, CH, t1 - t0, DK, device=X.device) <= self.proba_flashback + ).long() + self.rec_K[:, :, t0:t1] = ( + mask_K * K[n, src_head, src_time, dk] + + (1 - mask_K) * self.rec_K[:, :, t0:t1] + ) + ###################################################################### # compute the readout @@ -622,7 +672,7 @@ class Caterpillar(nn.Module): class QKVAttention(nn.Module): def __init__( self, - dim_in, + dim_model, dim_qk, dim_v, nb_heads=1, @@ -638,10 +688,10 @@ class QKVAttention(nn.Module): self.attention_dropout = attention_dropout self.record_attention = False - self.w_q = randw(nb_heads, dim_qk, dim_in) - self.w_k = randw(nb_heads, dim_qk, dim_in) - self.w_v = randw(nb_heads, dim_v, dim_in) - self.w_o = randw(dim_v * nb_heads, dim_in) + self.w_q = randw(nb_heads, dim_qk, dim_model) + self.w_k = randw(nb_heads, dim_qk, dim_model) + self.w_v = randw(nb_heads, dim_v, dim_model) + self.w_o = randw(dim_v * nb_heads, dim_model) def forward(self, bs): x_q = bs.x @@ -745,7 +795,7 @@ class MyGPT(nn.Module): def attlayer(): if attention_layer == "mha": return QKVAttention( - dim_in=dim_model, + dim_model=dim_model, dim_qk=dim_keys, dim_v=dim_model // nb_heads, nb_heads=nb_heads, @@ -754,7 +804,7 @@ class MyGPT(nn.Module): ) elif attention_layer == "dumbrec": return DumbRec( - dim_in=dim_model, + dim_model=dim_model, dim_qk=dim_keys, dim_v=dim_rec_v, nb_heads=nb_heads, @@ -763,7 +813,7 @@ class MyGPT(nn.Module): ) elif attention_layer == "kvrec": return KVRec( - dim_in=dim_model, + dim_model=dim_model, dim_qk=dim_keys, dim_v=dim_rec_v, nb_heads=nb_heads, @@ -772,7 +822,7 @@ class MyGPT(nn.Module): ) elif attention_layer == "caterpillar": return Caterpillar( - dim_in=dim_model, + dim_model=dim_model, dim_qk=dim_keys, dim_v=dim_rec_v, nb_heads=nb_heads, @@ -912,7 +962,7 @@ if __name__ == "__main__": print("Basic check.") m = Caterpillar( - dim_in=4, + dim_model=4, dim_qk=3, dim_v=7, nb_heads=1,