X-Git-Url: https://fleuret.org/cgi-bin/gitweb/gitweb.cgi?a=blobdiff_plain;f=mygpt.py;h=bd870bc67ec1c8895abe4cd8c81d2b113e4666f9;hb=5c298b53859b4d97aa85331034af952aae3b0c05;hp=de69a755f9510daff3669c252fb14a8bec4b3148;hpb=f3f490def0be8a3ea2b9a0ac60f5bb33c5c45fb5;p=mygptrnn.git diff --git a/mygpt.py b/mygpt.py index de69a75..bd870bc 100755 --- a/mygpt.py +++ b/mygpt.py @@ -10,6 +10,8 @@ # with a caching mechanism for keys and values to avoid a O(N^3) cost # for auto-regression. +# This implementation is equipped with RNN layers to replace the MHA + import math, warnings import torch, einops @@ -37,7 +39,7 @@ import ffutils # 1 for the successive tokens. # # Modules able to process brackets may implement a cache that is -# resetted when the input bracket starts at t=0 +# resetted when init_cache is True class BracketedSequence: @@ -481,8 +483,8 @@ class Caterpillar(nn.Module): self.caterpillar_height = caterpillar_height self.attention_dropout = attention_dropout - warnings.warn("flash back", RuntimeWarning) - self.proba_flashback = 0.1 + self.proba_flashback = 0.0 + self.proba_gate_dropout = 0.0 self.w_G = randw(nb_heads, caterpillar_height, dim_model) self.b_G = nn.Parameter( @@ -551,7 +553,11 @@ class Caterpillar(nn.Module): torch.einsum("ntc,hec->nhet", X, self.w_G) + self.b_G[None, :, :, None] ).sigmoid() - # That bas a bad idea + if self.training and self.proba_gate_dropout > 0.0: + warnings.warn("gate droupout", RuntimeWarning) + epsilon = 0.5 + + # That was a bad idea # G = F.dropout(G, self.attention_dropout, self.training) V = torch.einsum("ntc,hdc->nhtd", X, self.w_V) @@ -559,6 +565,10 @@ class Caterpillar(nn.Module): # We prepare the arguments for the parallel scan + # Clip the gating + warnings.warn("gating clipping", RuntimeWarning) + G = G / G.sum(1, keepdim=True).clamp(min=1) + A = 1 - G.sum(1) gated_V = torch.einsum("nhet,nhtd->netd", G, V) gated_K = torch.einsum("nhet,nhtd->netd", G, K) @@ -585,6 +595,7 @@ class Caterpillar(nn.Module): self.rec_K[:, :, t0:t1] = next_K.flatten(2, 3) if self.training and self.proba_flashback > 0.0: + warnings.warn("flash back", RuntimeWarning) # This piece of code makes the assumption that there is # nothing informative before t0, otherwise we'd have to # implement a cache for V and K too. This should not be @@ -603,20 +614,18 @@ class Caterpillar(nn.Module): src_time = t - u - t0 src_head = torch.randint(H, (N, CH, t1 - t0, 1), device=X.device) - mask_V = ( + mask = ( torch.rand(N, CH, t1 - t0, DV, device=X.device) <= self.proba_flashback ).long() + self.rec_V[:, :, t0:t1] = ( - mask_V * V[n, src_head, src_time, dv] - + (1 - mask_V) * self.rec_V[:, :, t0:t1] + mask * V[n, src_head, src_time, dv] + + (1 - mask) * self.rec_V[:, :, t0:t1] ) - mask_K = ( - torch.rand(N, CH, t1 - t0, DK, device=X.device) <= self.proba_flashback - ).long() self.rec_K[:, :, t0:t1] = ( - mask_K * K[n, src_head, src_time, dk] - + (1 - mask_K) * self.rec_K[:, :, t0:t1] + mask * K[n, src_head, src_time, dk] + + (1 - mask) * self.rec_K[:, :, t0:t1] ) ###################################################################### @@ -773,7 +782,12 @@ class MyGPT(nn.Module): ): super().__init__() - assert attention_layer in {"mha", "dumbrec", "kvrec", "caterpillar"} + assert attention_layer in { + "mha", + "dumbrec", + "kvrec", + "caterpillar", + }, f"Unknown attention operator {attention_layer}." if attention_layer == "caterpillar": assert nb_lines % caterpillar_height == 0