X-Git-Url: https://fleuret.org/cgi-bin/gitweb/gitweb.cgi?a=blobdiff_plain;f=mygpt.py;h=d8fd227f63c39a70dded3c55f3c230c3a9d58862;hb=3e4af6d54fb3d7bd6794035cb79e30ecdcadeb6f;hp=d1acf22b1a6359cf2ffc31fe2d0885306674d6e4;hpb=ca56d3dfa53f3486da1d651f31f1e34ea0dc4652;p=mygptrnn.git diff --git a/mygpt.py b/mygpt.py index d1acf22..d8fd227 100755 --- a/mygpt.py +++ b/mygpt.py @@ -10,6 +10,8 @@ # with a caching mechanism for keys and values to avoid a O(N^3) cost # for auto-regression. +# This implementation is equipped with RNN layers to replace the MHA + import math, warnings import torch, einops @@ -37,7 +39,7 @@ import ffutils # 1 for the successive tokens. # # Modules able to process brackets may implement a cache that is -# resetted when the input bracket starts at t=0 +# resetted when init_cache is True class BracketedSequence: @@ -441,6 +443,11 @@ class KVRec(nn.Module): ############################## +# Returns a tensor with an additional index at rank win_dim, that move +# along the same dimension as dim, on a domain {0...win_size-1}, and +# dim is restricted on a domain reduced by win_size-1 values. + + def moving_window(x, dim, win_dim, win_size): size, stride = x.size(), x.stride() size = size[:dim] + (size[dim] - win_size + 1,) + size[dim + 1 :] @@ -476,6 +483,8 @@ class Caterpillar(nn.Module): self.caterpillar_height = caterpillar_height self.attention_dropout = attention_dropout + self.proba_gate_dropout = 0.0 + self.w_G = randw(nb_heads, caterpillar_height, dim_model) self.b_G = nn.Parameter( torch.full( @@ -507,9 +516,10 @@ class Caterpillar(nn.Module): N = bs.x.size(0) T = bs.x.size(1) + H = self.w_V.size(0) DV = self.w_V.size(1) DK = self.w_K.size(1) - Dout = self.w_O.size(1) + DM = self.w_O.size(1) CH = self.caterpillar_height CL = self.caterpillar_length @@ -517,6 +527,8 @@ class Caterpillar(nn.Module): t0 >= CL and (t1 - t0) % CL == 0 ), f"bs.first should be greater than caterpillar_length, and bs.nb should be a multiple of caterpillar_length" + # We cache values to deal efficiently with auto-regression + if bs.init_cache: self.rec_V = X.new_zeros(N, CH, T, DV) self.rec_K = X.new_zeros(N, CH, T, DK) @@ -525,23 +537,28 @@ class Caterpillar(nn.Module): self.rec_V[:, :, t0 - CL : t0] = self.init_V_rec[None, :, :, :] self.rec_K[:, :, t0 - CL : t0] = self.init_K_rec[None, :, :, :] - self.cache_Y = X.new_zeros(N, T, Dout) + self.cache_Y = X.new_zeros(N, T, DM) + + V = torch.einsum("ntc,hdc->nhtd", X, self.w_V) + K = torch.einsum("ntc,hdc->nhtd", X, self.w_K) ###################################################################### # Compute the recurrent state - # This is the Gating sequence that modulates if they key and - # values should be stored in one of the CH pairs of the - # current stack. The CH gating values are independent, which - # means that the same thing could be stored up to CH times or - # not at all + # This is the Gating sequence that modulates the storing of + # the new key and value in the CH pairs of the current + # stack. There are CH independent gating values, which means + # that the current K/V may be stored in multiple pairs of the + # recurrent state, or not at all. G = ( torch.einsum("ntc,hec->nhet", X, self.w_G) + self.b_G[None, :, :, None] ).sigmoid() - V = torch.einsum("ntc,hdc->nhtd", X, self.w_V) - K = torch.einsum("ntc,hdc->nhtd", X, self.w_K) + # Clip the gating to avoid values greater than 1 when several + # heads hit the same row + + G = G / G.sum(1, keepdim=True).clamp(min=1) # We prepare the arguments for the parallel scan @@ -549,13 +566,25 @@ class Caterpillar(nn.Module): gated_V = torch.einsum("nhet,nhtd->netd", G, V) gated_K = torch.einsum("nhet,nhtd->netd", G, K) + # We start from cached values, which matters in inference + init_rec_V = self.rec_V[:, :, t0 - CL : t0] init_rec_K = self.rec_K[:, :, t0 - CL : t0] - # Here there is a trick: The parallel scan operates with a - # period of L, so we split the sequence indexing in two axes, - # the second of size CL, and run the parallel scan using the - # other alone as the sequence index. + ###################################################################### + + if self.training and self.proba_gate_dropout > 0.0: + warnings.warn("gate dropout", RuntimeWarning) + epsilon = 0.5 + + ################################################################# + # Associative scan + + # Here there is a trick: Since the stack at position t is + # computed by updating that at position t-CL, the parallel + # scan operates with a period of CL. To do so we split the + # sequence indexing in two axes, the second of size CL, and + # run the parallel scan using the first as the sequence index. A = A.unflatten(2, (-1, CL)) gated_V = gated_V.unflatten(2, (-1, CL)) @@ -564,8 +593,6 @@ class Caterpillar(nn.Module): next_V = pscan_dim(A, gated_V, init_rec_V, dim=2) next_K = pscan_dim(A, gated_K, init_rec_K, dim=2) - # Put back the sequence index - self.rec_V[:, :, t0:t1] = next_V.flatten(2, 3) self.rec_K[:, :, t0:t1] = next_K.flatten(2, 3) @@ -715,7 +742,6 @@ class MyGPT(nn.Module): nb_blocks, nb_lines=None, caterpillar_height=None, - dim_rec_v=-1, causal=False, dropout=0.0, len_max=1e5, @@ -723,7 +749,12 @@ class MyGPT(nn.Module): ): super().__init__() - assert attention_layer in {"mha", "dumbrec", "kvrec", "caterpillar"} + assert attention_layer in { + "mha", + "dumbrec", + "kvrec", + "caterpillar", + }, f"Unknown attention operator {attention_layer}." if attention_layer == "caterpillar": assert nb_lines % caterpillar_height == 0 @@ -756,7 +787,7 @@ class MyGPT(nn.Module): return DumbRec( dim_model=dim_model, dim_qk=dim_keys, - dim_v=dim_rec_v, + dim_v=dim_model // nb_heads, nb_heads=nb_heads, nb_lines=nb_lines, attention_dropout=dropout, @@ -765,7 +796,7 @@ class MyGPT(nn.Module): return KVRec( dim_model=dim_model, dim_qk=dim_keys, - dim_v=dim_rec_v, + dim_v=dim_model // nb_heads, nb_heads=nb_heads, nb_lines=nb_lines, attention_dropout=dropout, @@ -774,7 +805,7 @@ class MyGPT(nn.Module): return Caterpillar( dim_model=dim_model, dim_qk=dim_keys, - dim_v=dim_rec_v, + dim_v=dim_model // nb_heads, nb_heads=nb_heads, caterpillar_length=self.caterpillar_length, caterpillar_height=self.caterpillar_height,