Update.
[mygptrnn.git] / mygpt.py
index f3c9a93..95e5527 100755 (executable)
--- a/mygpt.py
+++ b/mygpt.py
@@ -10,6 +10,8 @@
 # with a caching mechanism for keys and values to avoid a O(N^3) cost
 # for auto-regression.
 
+# This implementation is equipped with RNN layers to replace the MHA
+
 import math, warnings
 
 import torch, einops
@@ -481,8 +483,8 @@ class Caterpillar(nn.Module):
         self.caterpillar_height = caterpillar_height
         self.attention_dropout = attention_dropout
 
-        warnings.warn("flash back", RuntimeWarning)
-        self.proba_flashback = 1e-2
+        self.proba_flashback = 0.0
+        self.proba_gate_dropout = 0.0
 
         self.w_G = randw(nb_heads, caterpillar_height, dim_model)
         self.b_G = nn.Parameter(
@@ -551,7 +553,11 @@ class Caterpillar(nn.Module):
             torch.einsum("ntc,hec->nhet", X, self.w_G) + self.b_G[None, :, :, None]
         ).sigmoid()
 
-        # That bas a bad idea
+        if self.training and self.proba_gate_dropout > 0.0:
+            warnings.warn("gate droupout", RuntimeWarning)
+            epsilon = 0.5
+
+        # That was a bad idea
         # G = F.dropout(G, self.attention_dropout, self.training)
 
         V = torch.einsum("ntc,hdc->nhtd", X, self.w_V)
@@ -559,6 +565,10 @@ class Caterpillar(nn.Module):
 
         # We prepare the arguments for the parallel scan
 
+        # Clip the gating
+        warnings.warn("gating clipping", RuntimeWarning)
+        G = G / G.sum(1, keepdim=True).clamp(min=1)
+
         A = 1 - G.sum(1)
         gated_V = torch.einsum("nhet,nhtd->netd", G, V)
         gated_K = torch.einsum("nhet,nhtd->netd", G, K)
@@ -585,6 +595,7 @@ class Caterpillar(nn.Module):
         self.rec_K[:, :, t0:t1] = next_K.flatten(2, 3)
 
         if self.training and self.proba_flashback > 0.0:
+            warnings.warn("flash back", RuntimeWarning)
             # This piece of code makes the assumption that there is
             # nothing informative before t0, otherwise we'd have to
             # implement a cache for V and K too. This should not be
@@ -763,7 +774,6 @@ class MyGPT(nn.Module):
         nb_blocks,
         nb_lines=None,
         caterpillar_height=None,
-        dim_rec_v=-1,
         causal=False,
         dropout=0.0,
         len_max=1e5,
@@ -771,7 +781,12 @@ class MyGPT(nn.Module):
     ):
         super().__init__()
 
-        assert attention_layer in {"mha", "dumbrec", "kvrec", "caterpillar"}
+        assert attention_layer in {
+            "mha",
+            "dumbrec",
+            "kvrec",
+            "caterpillar",
+        }, f"Unknown attention operator {attention_layer}."
 
         if attention_layer == "caterpillar":
             assert nb_lines % caterpillar_height == 0
@@ -804,7 +819,7 @@ class MyGPT(nn.Module):
                 return DumbRec(
                     dim_model=dim_model,
                     dim_qk=dim_keys,
-                    dim_v=dim_rec_v,
+                    dim_v=dim_model // nb_heads,
                     nb_heads=nb_heads,
                     nb_lines=nb_lines,
                     attention_dropout=dropout,
@@ -813,7 +828,7 @@ class MyGPT(nn.Module):
                 return KVRec(
                     dim_model=dim_model,
                     dim_qk=dim_keys,
-                    dim_v=dim_rec_v,
+                    dim_v=dim_model // nb_heads,
                     nb_heads=nb_heads,
                     nb_lines=nb_lines,
                     attention_dropout=dropout,
@@ -822,7 +837,7 @@ class MyGPT(nn.Module):
                 return Caterpillar(
                     dim_model=dim_model,
                     dim_qk=dim_keys,
-                    dim_v=dim_rec_v,
+                    dim_v=dim_model // nb_heads,
                     nb_heads=nb_heads,
                     caterpillar_length=self.caterpillar_length,
                     caterpillar_height=self.caterpillar_height,