From 3dd98b99909b2bca323673263874e2abb39ac10c Mon Sep 17 00:00:00 2001 From: =?utf8?q?Fran=C3=A7ois=20Fleuret?= Date: Sun, 14 Jan 2024 14:00:31 +0100 Subject: [PATCH] Update. --- fridge | 27 ++++++++++++++++++++++ mygpt.py | 70 +++++++++++++++++++++++++++++--------------------------- 2 files changed, 63 insertions(+), 34 deletions(-) diff --git a/fridge b/fridge index 194c4e6..f87c1df 100644 --- a/fridge +++ b/fridge @@ -177,3 +177,30 @@ def insert_flash_back(rec_V, V, rec_K, K, t0, t1, CL, proba): print(f"\n\nSANITY {a**T}\n") exit(0) + +###################################################################### + +2024 Jan 14 13:39:37 (from mygpt.py) + + epsilon = 0.5 + + dropout_head = ( + (torch.rand(N, H, 1, t1 - t0, device=G.device).sort(dim=3).indices == 0) + .expand_as(G) + .float() + ) + + dropout_tail = dropout_head.cumsum(dim=3) - dropout_head + + dropout_active = ( + torch.rand(N, 1, 1, 1, device=G.device) < self.proba_gate_dropout + ).long() + + dropout_head *= dropout_active + dropout_tail *= dropout_active + + G = ( + G + + dropout_head * (1 - epsilon - G.detach()) + - dropout_tail * G.detach() + ) diff --git a/mygpt.py b/mygpt.py index 099847c..3a48cdb 100755 --- a/mygpt.py +++ b/mygpt.py @@ -491,16 +491,27 @@ class Caterpillar(nn.Module): self.caterpillar_height = caterpillar_height self.attention_dropout = attention_dropout - self.proba_gate_dropout = 0.0 + ###################################################################### + # sup_args + + x = kwargs.get("gate_dropout") + if x is None: + self.proba_gate_dropout = 0.0 + else: + self.proba_gate_dropout = float(x) - default_bg = kwargs.get("default_bg") - if default_bg is None: + logger(f"self.proba_gate_dropout {self.proba_gate_dropout}") + + x = kwargs.get("default_bg") + if x is None: default_bg = -math.log(caterpillar_height - 1) else: - default_bg = float(default_bg) + default_bg = float(x) logger(f"default_bg {default_bg}") + ###################################################################### + self.w_G = randw(nb_heads, caterpillar_height, dim_model) self.b_G = nn.Parameter(torch.full((nb_heads, caterpillar_height), default_bg)) @@ -575,20 +586,11 @@ class Caterpillar(nn.Module): torch.einsum("ntc,hrc->nhrt", X, self.w_G) + self.b_G[None, :, :, None] ).sigmoid() - # Clip the gating to avoid values greater than 1 when several - # heads hit the same row + # warnings.warn("softmax gating", RuntimeWarning) - G = G / G.sum(1, keepdim=True).clamp(min=1) - - ###################################################################### - # Roll the gating indexes - - # warnings.warn("rotating barrel", RuntimeWarning) - - # r_barrel = torch.arange(R, device=G.device)[None, None, :, None] - # t_barrel = torch.arange(t1 - t0, device=G.device)[None, None, None, :] - # r_barrel = (r_barrel + (t_barrel + t0) // L) % R - # G = G.gather(dim=2, index=r_barrel.expand_as(G)) + # G = ( + # torch.einsum("ntc,hrc->nhrt", X, self.w_G) + self.b_G[None, :, :, None] + # ).softmax(dim=2) ###################################################################### # The "flashbacks" @@ -599,30 +601,30 @@ class Caterpillar(nn.Module): # G is NxHxExT where e is the caterpillar's row. warnings.warn("gate dropout", RuntimeWarning) - epsilon = 0.5 - dropout_head = ( - (torch.rand(N, H, 1, t1 - t0, device=G.device).sort(dim=3).indices == 0) - .expand_as(G) - .float() - ) + kill = ( + torch.rand(G.size(), device=G.device) <= self.proba_gate_dropout + ).float() - dropout_tail = dropout_head.cumsum(dim=3) - dropout_head + alpha = G / (1 - self.proba_gate_dropout) - dropout_active = ( - torch.rand(N, 1, 1, 1, device=G.device) < self.proba_gate_dropout - ).long() + G = alpha * (1 - kill) - dropout_head *= dropout_active - dropout_tail *= dropout_active + ###################################################################### + # Clip the gating to avoid values greater than 1 when several + # heads hit the same row - G = ( - G - + dropout_head * (1 - epsilon - G.detach()) - - dropout_tail * G.detach() - ) + G = G / G.sum(1, keepdim=True).clamp(min=1) ###################################################################### + # Roll the gating indexes + + # warnings.warn("rotating barrel", RuntimeWarning) + + # r_barrel = torch.arange(R, device=G.device)[None, None, :, None] + # t_barrel = torch.arange(t1 - t0, device=G.device)[None, None, None, :] + # r_barrel = (r_barrel + (t_barrel + t0) // L) % R + # G = G.gather(dim=2, index=r_barrel.expand_as(G)) # We prepare the arguments for the parallel scan -- 2.20.1