Update.
[mygptrnn.git] / mygpt.py
index a62cf49..3a48cdb 100755 (executable)
--- a/mygpt.py
+++ b/mygpt.py
@@ -190,6 +190,8 @@ class DumbRec(nn.Module):
         nb_lines,
         attention_dropout=0.0,
         len_max=1e5,
+        logger=print,
+        **kwargs,
     ):
         super().__init__()
 
@@ -319,6 +321,8 @@ class KVRec(nn.Module):
         nb_lines,
         attention_dropout=0.0,
         len_max=1e5,
+        logger=print,
+        **kwargs,
     ):
         super().__init__()
 
@@ -471,6 +475,8 @@ class Caterpillar(nn.Module):
         caterpillar_height,
         attention_dropout=0.0,
         len_max=1e5,
+        logger=print,
+        **kwargs,
     ):
         super().__init__()
 
@@ -485,14 +491,29 @@ class Caterpillar(nn.Module):
         self.caterpillar_height = caterpillar_height
         self.attention_dropout = attention_dropout
 
-        self.proba_gate_dropout = 0.0
+        ######################################################################
+        # sup_args
+
+        x = kwargs.get("gate_dropout")
+        if x is None:
+            self.proba_gate_dropout = 0.0
+        else:
+            self.proba_gate_dropout = float(x)
+
+        logger(f"self.proba_gate_dropout {self.proba_gate_dropout}")
+
+        x = kwargs.get("default_bg")
+        if x is None:
+            default_bg = -math.log(caterpillar_height - 1)
+        else:
+            default_bg = float(x)
+
+        logger(f"default_bg {default_bg}")
+
+        ######################################################################
 
         self.w_G = randw(nb_heads, caterpillar_height, dim_model)
-        self.b_G = nn.Parameter(
-            torch.full(
-                (nb_heads, caterpillar_height), -math.log(caterpillar_height - 1)
-            )
-        )
+        self.b_G = nn.Parameter(torch.full((nb_heads, caterpillar_height), default_bg))
 
         self.w_K = randw(nb_heads, dim_qk, dim_model)
         self.w_V = randw(nb_heads, dim_v, dim_model)
@@ -565,15 +586,11 @@ class Caterpillar(nn.Module):
             torch.einsum("ntc,hrc->nhrt", X, self.w_G) + self.b_G[None, :, :, None]
         ).sigmoid()
 
-        ######################################################################
-        # Roll the gating indexes
-
-        warnings.warn("rotating barrel", RuntimeWarning)
+        # warnings.warn("softmax gating", RuntimeWarning)
 
-        r_barrel = torch.arange(R, device=G.device)[None, None, :, None]
-        t_barrel = torch.arange(t1 - t0, device=G.device)[None, None, None, :]
-        r_barrel = (r_barrel + (t_barrel + t0) // L) % R
-        G = G.gather(dim=2, index=r_barrel.expand_as(G))
+        # G = (
+        # torch.einsum("ntc,hrc->nhrt", X, self.w_G) + self.b_G[None, :, :, None]
+        # ).softmax(dim=2)
 
         ######################################################################
         # The "flashbacks"
@@ -584,37 +601,32 @@ class Caterpillar(nn.Module):
             # G is NxHxExT where e is the caterpillar's row.
 
             warnings.warn("gate dropout", RuntimeWarning)
-            epsilon = 0.5
 
-            dropout_head = (
-                (torch.rand(N, H, 1, t1 - t0, device=G.device).sort(dim=3).indices == 0)
-                .expand_as(G)
-                .float()
-            )
+            kill = (
+                torch.rand(G.size(), device=G.device) <= self.proba_gate_dropout
+            ).float()
 
-            dropout_tail = dropout_head.cumsum(dim=3) - dropout_head
+            alpha = G / (1 - self.proba_gate_dropout)
 
-            dropout_active = (
-                torch.rand(N, 1, 1, 1, device=G.device) < self.proba_gate_dropout
-            ).long()
+            G = alpha * (1 - kill)
 
-            dropout_head *= dropout_active
-            dropout_tail *= dropout_active
+        ######################################################################
+        # Clip the gating to avoid values greater than 1 when several
+        # heads hit the same row
 
-            G = (
-                G
-                + dropout_head * (1 - epsilon - G.detach())
-                - dropout_tail * G.detach()
-            )
+        G = G / G.sum(1, keepdim=True).clamp(min=1)
 
         ######################################################################
+        # Roll the gating indexes
 
-        # We prepare the arguments for the parallel scan
+        # warnings.warn("rotating barrel", RuntimeWarning)
 
-        # Clip the gating to avoid values greater than 1 when several
-        # heads hit the same row
+        # r_barrel = torch.arange(R, device=G.device)[None, None, :, None]
+        # t_barrel = torch.arange(t1 - t0, device=G.device)[None, None, None, :]
+        # r_barrel = (r_barrel + (t_barrel + t0) // L) % R
+        # G = G.gather(dim=2, index=r_barrel.expand_as(G))
 
-        G = G / G.sum(1, keepdim=True).clamp(min=1)
+        # We prepare the arguments for the parallel scan
 
         A = 1 - G.sum(1)
 
@@ -709,6 +721,8 @@ class QKVAttention(nn.Module):
         nb_heads=1,
         causal=False,
         attention_dropout=0.0,
+        logger=print,
+        **kwargs,
     ):
         super().__init__()
 
@@ -800,6 +814,8 @@ class MyGPT(nn.Module):
         dropout=0.0,
         len_max=1e5,
         attention_layer="kvrec",
+        logger=print,
+        **kwargs,
     ):
         super().__init__()
 
@@ -836,6 +852,8 @@ class MyGPT(nn.Module):
                     nb_heads=nb_heads,
                     causal=causal,
                     attention_dropout=dropout,
+                    logger=logger,
+                    **kwargs,
                 )
             elif attention_layer == "dumbrec":
                 return DumbRec(
@@ -845,6 +863,8 @@ class MyGPT(nn.Module):
                     nb_heads=nb_heads,
                     nb_lines=nb_lines,
                     attention_dropout=dropout,
+                    logger=logger,
+                    **kwargs,
                 )
             elif attention_layer == "kvrec":
                 return KVRec(
@@ -854,6 +874,8 @@ class MyGPT(nn.Module):
                     nb_heads=nb_heads,
                     nb_lines=nb_lines,
                     attention_dropout=dropout,
+                    logger=logger,
+                    **kwargs,
                 )
             elif attention_layer == "caterpillar":
                 return Caterpillar(
@@ -864,6 +886,8 @@ class MyGPT(nn.Module):
                     caterpillar_length=self.caterpillar_length,
                     caterpillar_height=self.caterpillar_height,
                     attention_dropout=dropout,
+                    logger=logger,
+                    **kwargs,
                 )
             else:
                 raise ValueError(f"Unknown attention type {attention_layer}.")