X-Git-Url: https://fleuret.org/cgi-bin/gitweb/gitweb.cgi?a=blobdiff_plain;f=mygpt.py;h=3a48cdbb793160ea9c88875d4f353b6a89555477;hb=3dd98b99909b2bca323673263874e2abb39ac10c;hp=7c9991f7d56fc3069a261b0642e4381c55bd02d9;hpb=73acbc986f9c386c001117581c4fc72d2f36803a;p=mygptrnn.git diff --git a/mygpt.py b/mygpt.py index 7c9991f..3a48cdb 100755 --- a/mygpt.py +++ b/mygpt.py @@ -491,16 +491,29 @@ class Caterpillar(nn.Module): self.caterpillar_height = caterpillar_height self.attention_dropout = attention_dropout - self.proba_gate_dropout = 0.0 + ###################################################################### + # sup_args + + x = kwargs.get("gate_dropout") + if x is None: + self.proba_gate_dropout = 0.0 + else: + self.proba_gate_dropout = float(x) - default_b_G = kwargs.get("default_b_G") - if default_b_G is None: - default_b_G = -math.log(caterpillar_height - 1) + logger(f"self.proba_gate_dropout {self.proba_gate_dropout}") + + x = kwargs.get("default_bg") + if x is None: + default_bg = -math.log(caterpillar_height - 1) + else: + default_bg = float(x) - logger(f"default_b_G {default_b_G}") + logger(f"default_bg {default_bg}") + + ###################################################################### self.w_G = randw(nb_heads, caterpillar_height, dim_model) - self.b_G = nn.Parameter(torch.full((nb_heads, caterpillar_height), default_b_G)) + self.b_G = nn.Parameter(torch.full((nb_heads, caterpillar_height), default_bg)) self.w_K = randw(nb_heads, dim_qk, dim_model) self.w_V = randw(nb_heads, dim_v, dim_model) @@ -573,20 +586,11 @@ class Caterpillar(nn.Module): torch.einsum("ntc,hrc->nhrt", X, self.w_G) + self.b_G[None, :, :, None] ).sigmoid() - # Clip the gating to avoid values greater than 1 when several - # heads hit the same row + # warnings.warn("softmax gating", RuntimeWarning) - G = G / G.sum(1, keepdim=True).clamp(min=1) - - ###################################################################### - # Roll the gating indexes - - # warnings.warn("rotating barrel", RuntimeWarning) - - # r_barrel = torch.arange(R, device=G.device)[None, None, :, None] - # t_barrel = torch.arange(t1 - t0, device=G.device)[None, None, None, :] - # r_barrel = (r_barrel + (t_barrel + t0) // L) % R - # G = G.gather(dim=2, index=r_barrel.expand_as(G)) + # G = ( + # torch.einsum("ntc,hrc->nhrt", X, self.w_G) + self.b_G[None, :, :, None] + # ).softmax(dim=2) ###################################################################### # The "flashbacks" @@ -597,30 +601,30 @@ class Caterpillar(nn.Module): # G is NxHxExT where e is the caterpillar's row. warnings.warn("gate dropout", RuntimeWarning) - epsilon = 0.5 - dropout_head = ( - (torch.rand(N, H, 1, t1 - t0, device=G.device).sort(dim=3).indices == 0) - .expand_as(G) - .float() - ) + kill = ( + torch.rand(G.size(), device=G.device) <= self.proba_gate_dropout + ).float() - dropout_tail = dropout_head.cumsum(dim=3) - dropout_head + alpha = G / (1 - self.proba_gate_dropout) - dropout_active = ( - torch.rand(N, 1, 1, 1, device=G.device) < self.proba_gate_dropout - ).long() + G = alpha * (1 - kill) - dropout_head *= dropout_active - dropout_tail *= dropout_active + ###################################################################### + # Clip the gating to avoid values greater than 1 when several + # heads hit the same row - G = ( - G - + dropout_head * (1 - epsilon - G.detach()) - - dropout_tail * G.detach() - ) + G = G / G.sum(1, keepdim=True).clamp(min=1) ###################################################################### + # Roll the gating indexes + + # warnings.warn("rotating barrel", RuntimeWarning) + + # r_barrel = torch.arange(R, device=G.device)[None, None, :, None] + # t_barrel = torch.arange(t1 - t0, device=G.device)[None, None, None, :] + # r_barrel = (r_barrel + (t_barrel + t0) // L) % R + # G = G.gather(dim=2, index=r_barrel.expand_as(G)) # We prepare the arguments for the parallel scan