X-Git-Url: https://fleuret.org/cgi-bin/gitweb/gitweb.cgi?a=blobdiff_plain;f=mygpt.py;h=185df3870c1e0c8d35daf9f3e3257340d50fa1a7;hb=57332f677ef5ee535707c1b83a541aa0e79508e6;hp=0e94672777ad3df99016ffd0da855a927f8a6e6a;hpb=597f3a01de92be1a4f05114df11d9e39b3608e29;p=mygptrnn.git diff --git a/mygpt.py b/mygpt.py index 0e94672..185df38 100755 --- a/mygpt.py +++ b/mygpt.py @@ -10,6 +10,8 @@ # with a caching mechanism for keys and values to avoid a O(N^3) cost # for auto-regression. +# This implementation is equipped with RNN layers to replace the MHA + import math, warnings import torch, einops @@ -37,7 +39,7 @@ import ffutils # 1 for the successive tokens. # # Modules able to process brackets may implement a cache that is -# resetted when the input bracket starts at t=0 +# resetted when init_cache is True class BracketedSequence: @@ -474,13 +476,17 @@ class Caterpillar(nn.Module): warnings.warn("Caterpillar", RuntimeWarning) - def randw(*d): - return nn.Parameter(torch.randn(*d) / math.sqrt(d[-1])) + def randw(*d, amplitude=None): + if amplitude is None: + amplitude = 1 / math.sqrt(d[-1]) + return nn.Parameter(amplitude * torch.randn(*d)) self.caterpillar_length = caterpillar_length self.caterpillar_height = caterpillar_height self.attention_dropout = attention_dropout + self.proba_gate_dropout = 0.0 + self.w_G = randw(nb_heads, caterpillar_height, dim_model) self.b_G = nn.Parameter( torch.full( @@ -493,8 +499,16 @@ class Caterpillar(nn.Module): self.w_Q = randw(nb_heads, dim_qk, dim_model) self.w_O = randw(dim_v * nb_heads, dim_model) - self.init_K_rec = randw(caterpillar_height, caterpillar_length, dim_qk) - self.init_V_rec = randw(caterpillar_height, caterpillar_length, dim_v) + self.init_K_rec = randw( + caterpillar_height, + caterpillar_length, + dim_qk, + ) + self.init_V_rec = randw( + caterpillar_height, + caterpillar_length, + dim_v, + ) def reset_inner_loss(self): self.acc_attention = 0 @@ -512,6 +526,7 @@ class Caterpillar(nn.Module): N = bs.x.size(0) T = bs.x.size(1) + H = self.w_V.size(0) DV = self.w_V.size(1) DK = self.w_K.size(1) DM = self.w_O.size(1) @@ -534,38 +549,80 @@ class Caterpillar(nn.Module): self.cache_Y = X.new_zeros(N, T, DM) + V = torch.einsum("ntc,hdc->nhtd", X, self.w_V) + K = torch.einsum("ntc,hdc->nhtd", X, self.w_K) + ###################################################################### # Compute the recurrent state # This is the Gating sequence that modulates the storing of # the new key and value in the CH pairs of the current - # stack. The CH gating values are independent, which means - # that the current K/V could be stored in all the pairs of the + # stack. There are CH independent gating values, which means + # that the current K/V may be stored in multiple pairs of the # recurrent state, or not at all. G = ( - torch.einsum("ntc,hec->nhet", X, self.w_G) + self.b_G[None, :, :, None] + torch.einsum("ntc,hrc->nhrt", X, self.w_G) + self.b_G[None, :, :, None] ).sigmoid() - G = F.dropout(G, self.attention_dropout, self.training) + ###################################################################### + # The "flashbacks" - V = torch.einsum("ntc,hdc->nhtd", X, self.w_V) - K = torch.einsum("ntc,hdc->nhtd", X, self.w_K) + if self.training and self.proba_gate_dropout > 0.0: + # This is a better implementation of "flashbacks". + + # G is NxHxExT where e is the caterpillar's row. + + warnings.warn("gate dropout", RuntimeWarning) + epsilon = 0.5 + + dropout_head = ( + (torch.rand(N, H, 1, t1 - t0, device=G.device).sort(dim=3).indices == 0) + .expand_as(G) + .float() + ) + + dropout_tail = dropout_head.cumsum(dim=3) - dropout_head + + dropout_active = ( + torch.rand(N, 1, 1, 1, device=G.device) < self.proba_gate_dropout + ).long() + + dropout_head *= dropout_active + dropout_tail *= dropout_active + + G = ( + G + # + dropout_head * (1 - epsilon - G.detach()) + - dropout_tail * G.detach() + ) + + ###################################################################### # We prepare the arguments for the parallel scan + # Clip the gating to avoid values greater than 1 when several + # heads hit the same row + + G = G / G.sum(1, keepdim=True).clamp(min=1) + A = 1 - G.sum(1) - gated_V = torch.einsum("nhet,nhtd->netd", G, V) - gated_K = torch.einsum("nhet,nhtd->netd", G, K) + gated_V = torch.einsum("nhrt,nhtd->nrtd", G, V) + gated_K = torch.einsum("nhrt,nhtd->nrtd", G, K) + + # We start from cached values, which matters in inference init_rec_V = self.rec_V[:, :, t0 - CL : t0] init_rec_K = self.rec_K[:, :, t0 - CL : t0] - # Here there is a trick: Since the stack at time t is computed - # by updating that at time t-L, the parallel scan operates - # with a period of L. To do so we split the time indexing in - # two axes, the second of size CL, and run the parallel scan - # using the other as the sequence index. + ################################################################# + # Associative scan + + # Here there is a trick: Since the stack at position t is + # computed by updating that at position t-CL, the parallel + # scan operates with a period of CL. To do so we split the + # sequence indexing in two axes, the second of size CL, and + # run the parallel scan using the first as the sequence index. A = A.unflatten(2, (-1, CL)) gated_V = gated_V.unflatten(2, (-1, CL)) @@ -574,8 +631,6 @@ class Caterpillar(nn.Module): next_V = pscan_dim(A, gated_V, init_rec_V, dim=2) next_K = pscan_dim(A, gated_K, init_rec_K, dim=2) - # Put back the sequence index - self.rec_V[:, :, t0:t1] = next_V.flatten(2, 3) self.rec_K[:, :, t0:t1] = next_K.flatten(2, 3) @@ -725,7 +780,6 @@ class MyGPT(nn.Module): nb_blocks, nb_lines=None, caterpillar_height=None, - dim_rec_v=-1, causal=False, dropout=0.0, len_max=1e5, @@ -733,7 +787,12 @@ class MyGPT(nn.Module): ): super().__init__() - assert attention_layer in {"mha", "dumbrec", "kvrec", "caterpillar"} + assert attention_layer in { + "mha", + "dumbrec", + "kvrec", + "caterpillar", + }, f"Unknown attention operator {attention_layer}." if attention_layer == "caterpillar": assert nb_lines % caterpillar_height == 0 @@ -766,7 +825,7 @@ class MyGPT(nn.Module): return DumbRec( dim_model=dim_model, dim_qk=dim_keys, - dim_v=dim_rec_v, + dim_v=dim_model // nb_heads, nb_heads=nb_heads, nb_lines=nb_lines, attention_dropout=dropout, @@ -775,7 +834,7 @@ class MyGPT(nn.Module): return KVRec( dim_model=dim_model, dim_qk=dim_keys, - dim_v=dim_rec_v, + dim_v=dim_model // nb_heads, nb_heads=nb_heads, nb_lines=nb_lines, attention_dropout=dropout, @@ -784,7 +843,7 @@ class MyGPT(nn.Module): return Caterpillar( dim_model=dim_model, dim_qk=dim_keys, - dim_v=dim_rec_v, + dim_v=dim_model // nb_heads, nb_heads=nb_heads, caterpillar_length=self.caterpillar_length, caterpillar_height=self.caterpillar_height,