Update.
[mygptrnn.git] / mygpt.py
index 633ad64..aded796 100755 (executable)
--- a/mygpt.py
+++ b/mygpt.py
@@ -190,6 +190,8 @@ class DumbRec(nn.Module):
         nb_lines,
         attention_dropout=0.0,
         len_max=1e5,
+        logger=print,
+        **kwargs,
     ):
         super().__init__()
 
@@ -319,6 +321,8 @@ class KVRec(nn.Module):
         nb_lines,
         attention_dropout=0.0,
         len_max=1e5,
+        logger=print,
+        **kwargs,
     ):
         super().__init__()
 
@@ -460,6 +464,36 @@ def moving_window(x, dim, win_dim, win_size):
 ##############################
 
 
+class Calibrator:
+    def __init__(self, w=None, b=None):
+        self.w = w
+        self.b = b
+        self.s, self.s_sq, self.n = 0, 0, 0
+        self.mean, self.std = 0, 0
+
+    def update(self, X):
+        X = X.detach()
+        self.s += X.sum(dim=0)
+        self.s_sq += X.pow(2).sum(dim=0)
+        self.n += X.size(0)
+
+    def moments(self):
+        mean = self.s / self.n
+        std = (self.s_sq / self.n - mean * mean).sqrt()
+        return mean, std
+
+    def normalize(self):
+        mean, std = self.moments()
+        if self.b is not None:
+            self.b.sub_(mean)
+        if self.w is not None:
+            self.w.div_(std)
+        result = mean - self.mean, std - self.std
+        self.mean, self.std = mean, std
+        self.s, self.s_sq, self.n = 0, 0, 0
+        return result
+
+
 class Caterpillar(nn.Module):
     def __init__(
         self,
@@ -471,6 +505,8 @@ class Caterpillar(nn.Module):
         caterpillar_height,
         attention_dropout=0.0,
         len_max=1e5,
+        logger=print,
+        **kwargs,
     ):
         super().__init__()
 
@@ -485,14 +521,29 @@ class Caterpillar(nn.Module):
         self.caterpillar_height = caterpillar_height
         self.attention_dropout = attention_dropout
 
-        self.proba_gate_dropout = 0.0
+        ######################################################################
+        # sup_args
+
+        x = kwargs.get("gate_dropout")
+        if x is None:
+            self.proba_gate_dropout = 0.0
+        else:
+            self.proba_gate_dropout = float(x)
+
+        logger(f"self.proba_gate_dropout {self.proba_gate_dropout}")
+
+        x = kwargs.get("default_bg")
+        if x is None:
+            default_bg = -math.log(caterpillar_height - 1)
+        else:
+            default_bg = float(x)
+
+        logger(f"default_bg {default_bg}")
+
+        ######################################################################
 
         self.w_G = randw(nb_heads, caterpillar_height, dim_model)
-        self.b_G = nn.Parameter(
-            torch.full(
-                (nb_heads, caterpillar_height), -math.log(caterpillar_height - 1)
-            )
-        )
+        self.b_G = nn.Parameter(torch.full((nb_heads, caterpillar_height), default_bg))
 
         self.w_K = randw(nb_heads, dim_qk, dim_model)
         self.w_V = randw(nb_heads, dim_v, dim_model)
@@ -510,6 +561,10 @@ class Caterpillar(nn.Module):
             dim_v,
         )
 
+        self.calibrator_G = Calibrator()
+        self.calibrator_rec_V = Calibrator()
+        self.calibrator_rec_K = Calibrator()
+
     def reset_inner_loss(self):
         self.acc_attention = 0
         self.acc_nb = 0
@@ -544,8 +599,8 @@ class Caterpillar(nn.Module):
             self.rec_K = X.new_zeros(N, R, T, DK)
             # We start the recurrent sequences with optimizable
             # initial values. No idea if it helps.
-            self.rec_V[:, :, t0 - L : t0] = self.init_V_rec[None, :, :, :]
-            self.rec_K[:, :, t0 - L : t0] = self.init_K_rec[None, :, :, :]
+            self.rec_V[:, :, t0 - L : t0, :] = self.init_V_rec[None, :, :, :]
+            self.rec_K[:, :, t0 - L : t0, :] = self.init_K_rec[None, :, :, :]
 
             self.cache_Y = X.new_zeros(N, T, DM)
 
@@ -565,24 +620,13 @@ class Caterpillar(nn.Module):
             torch.einsum("ntc,hrc->nhrt", X, self.w_G) + self.b_G[None, :, :, None]
         ).sigmoid()
 
-        ######################################################################
-        # Roll the gating indexes
-
-        warnings.warn("rotating barrel", RuntimeWarning)
-
-        # print(f"SANITY2 {N=} {H=} {R=} {t0=} {t1=} {G.size()=}")
+        self.calibrator_G.update(G.reshape(-1, G.size(-1)))
 
-        n_barrel = torch.arange(N, device=G.device)[:, None, None, None]
-        h_barrel = torch.arange(H, device=G.device)[None, :, None, None]
-        r_barrel = torch.arange(R, device=G.device)[None, None, :, None]
-        t_barrel = torch.arange(t1 - t0, device=G.device)[None, None, None, :]
-        r_barrel = (r_barrel + (t_barrel + t0) // L) % R
+        # warnings.warn("softmax gating", RuntimeWarning)
 
-        # GG = G.gather(dim=2,index=r_barrel)
-        G = G[n_barrel, h_barrel, r_barrel, t_barrel]
-
-        # print("SANITY", (GG-G).abs())
-        # exit(0)
+        # G = (
+        # torch.einsum("ntc,hrc->nhrt", X, self.w_G) + self.b_G[None, :, :, None]
+        # ).softmax(dim=2)
 
         ######################################################################
         # The "flashbacks"
@@ -593,37 +637,32 @@ class Caterpillar(nn.Module):
             # G is NxHxExT where e is the caterpillar's row.
 
             warnings.warn("gate dropout", RuntimeWarning)
-            epsilon = 0.5
 
-            dropout_head = (
-                (torch.rand(N, H, 1, t1 - t0, device=G.device).sort(dim=3).indices == 0)
-                .expand_as(G)
-                .float()
-            )
+            kill = (
+                torch.rand(G.size(), device=G.device) <= self.proba_gate_dropout
+            ).float()
 
-            dropout_tail = dropout_head.cumsum(dim=3) - dropout_head
+            alpha = G / (1 - self.proba_gate_dropout)
 
-            dropout_active = (
-                torch.rand(N, 1, 1, 1, device=G.device) < self.proba_gate_dropout
-            ).long()
+            G = alpha * (1 - kill)
 
-            dropout_head *= dropout_active
-            dropout_tail *= dropout_active
+        ######################################################################
+        # Clip the gating to avoid values greater than 1 when several
+        # heads hit the same row
 
-            G = (
-                G
-                + dropout_head * (1 - epsilon - G.detach())
-                - dropout_tail * G.detach()
-            )
+        G = G / G.sum(1, keepdim=True).clamp(min=1)
 
         ######################################################################
+        # Roll the gating indexes
 
-        # We prepare the arguments for the parallel scan
+        # warnings.warn("rotating barrel", RuntimeWarning)
 
-        # Clip the gating to avoid values greater than 1 when several
-        # heads hit the same row
+        # r_barrel = torch.arange(R, device=G.device)[None, None, :, None]
+        # t_barrel = torch.arange(t1 - t0, device=G.device)[None, None, None, :]
+        # r_barrel = (r_barrel + (t_barrel + t0) // L) % R
+        # G = G.gather(dim=2, index=r_barrel.expand_as(G))
 
-        G = G / G.sum(1, keepdim=True).clamp(min=1)
+        # We prepare the arguments for the parallel scan
 
         A = 1 - G.sum(1)
 
@@ -656,8 +695,18 @@ class Caterpillar(nn.Module):
         next_V = pscan_dim(A, gated_V, init_rec_V, dim=2)
         next_K = pscan_dim(A, gated_K, init_rec_K, dim=2)
 
-        self.rec_V[:, :, t0:t1] = next_V.flatten(2, 3)
-        self.rec_K[:, :, t0:t1] = next_K.flatten(2, 3)
+        next_V = next_V.flatten(2, 3)
+        next_K = next_K.flatten(2, 3)
+
+        self.calibrator_rec_V.update(
+            next_V.permute(0, 1, 3, 2).reshape(-1, next_V.size(2))
+        )
+        self.calibrator_rec_K.update(
+            next_K.permute(0, 1, 3, 2).reshape(-1, next_K.size(2))
+        )
+
+        self.rec_V[:, :, t0:t1] = next_V
+        self.rec_K[:, :, t0:t1] = next_K
 
         ######################################################################
         # compute the readout
@@ -718,6 +767,8 @@ class QKVAttention(nn.Module):
         nb_heads=1,
         causal=False,
         attention_dropout=0.0,
+        logger=print,
+        **kwargs,
     ):
         super().__init__()
 
@@ -809,6 +860,8 @@ class MyGPT(nn.Module):
         dropout=0.0,
         len_max=1e5,
         attention_layer="kvrec",
+        logger=print,
+        **kwargs,
     ):
         super().__init__()
 
@@ -845,6 +898,8 @@ class MyGPT(nn.Module):
                     nb_heads=nb_heads,
                     causal=causal,
                     attention_dropout=dropout,
+                    logger=logger,
+                    **kwargs,
                 )
             elif attention_layer == "dumbrec":
                 return DumbRec(
@@ -854,6 +909,8 @@ class MyGPT(nn.Module):
                     nb_heads=nb_heads,
                     nb_lines=nb_lines,
                     attention_dropout=dropout,
+                    logger=logger,
+                    **kwargs,
                 )
             elif attention_layer == "kvrec":
                 return KVRec(
@@ -863,6 +920,8 @@ class MyGPT(nn.Module):
                     nb_heads=nb_heads,
                     nb_lines=nb_lines,
                     attention_dropout=dropout,
+                    logger=logger,
+                    **kwargs,
                 )
             elif attention_layer == "caterpillar":
                 return Caterpillar(
@@ -873,6 +932,8 @@ class MyGPT(nn.Module):
                     caterpillar_length=self.caterpillar_length,
                     caterpillar_height=self.caterpillar_height,
                     attention_dropout=dropout,
+                    logger=logger,
+                    **kwargs,
                 )
             else:
                 raise ValueError(f"Unknown attention type {attention_layer}.")