X-Git-Url: https://fleuret.org/cgi-bin/gitweb/gitweb.cgi?a=blobdiff_plain;f=mygpt.py;h=b137cdbca1cd085ee8dec0185c514118e928b5d3;hb=9112db2ed7d8c262c4ef8298cf6637515675f967;hp=fb24b9aab1c706245d24a6474daa09d30f7a5fd2;hpb=8fdce4736a05a37d0f8706148dd743bce123fe1b;p=mygptrnn.git diff --git a/mygpt.py b/mygpt.py index fb24b9a..b137cdb 100755 --- a/mygpt.py +++ b/mygpt.py @@ -202,7 +202,7 @@ class DumbRec(nn.Module): attention_dropout=0.0, len_max=1e5, logger=print, - **kwargs, + args=None, ): super().__init__() @@ -333,7 +333,7 @@ class KVRec(nn.Module): attention_dropout=0.0, len_max=1e5, logger=print, - **kwargs, + args=None, ): super().__init__() @@ -487,7 +487,7 @@ class Caterpillar(nn.Module): attention_dropout=0.0, len_max=1e5, logger=print, - **kwargs, + args=None, ): super().__init__() @@ -502,27 +502,13 @@ class Caterpillar(nn.Module): self.caterpillar_height = caterpillar_height self.attention_dropout = attention_dropout - ###################################################################### - # sup_args - - x = kwargs.get("gate_dropout") - if x is None: - self.proba_gate_dropout = 0.0 - else: - self.proba_gate_dropout = float(x) - - logger(f"self.proba_gate_dropout {self.proba_gate_dropout}") - - x = kwargs.get("default_bg") - if x is None: - default_bg = -math.log(caterpillar_height - 1) - else: - default_bg = float(x) - - logger(f"default_bg {default_bg}") + self.gate_dropout_proba = args.gate_dropout_proba + self.gate_dropout_sync = args.gate_dropout_sync + self.gate_dropout_replace = args.gate_dropout_replace ###################################################################### + default_bg = -math.log(caterpillar_height - 1) self.w_G = randw(nb_heads, caterpillar_height, dim_model) self.b_G = nn.Parameter(torch.full((nb_heads, caterpillar_height), default_bg)) @@ -542,14 +528,14 @@ class Caterpillar(nn.Module): dim_v, ) - def reset_inner_loss(self): - self.acc_attention = 0 - self.acc_nb = 0 + # def reset_inner_loss(self): + # self.acc_attention = 0 + # self.acc_nb = 0 - def get_inner_loss(self): - # warnings.warn("l2 regularization", RuntimeWarning) - # return (self.acc_attention / self.acc_nb).pow(2).sum() - return torch.tensor([0], device=self.w_Q.device) + # def get_inner_loss(self): + # warnings.warn("l2 regularization", RuntimeWarning) + # return (self.acc_attention / self.acc_nb).pow(2).sum() + # return torch.tensor([0], device=self.w_Q.device) def forward(self, bs): # Dimensions to make the source a bit clearer, that's needed @@ -627,11 +613,8 @@ class Caterpillar(nn.Module): gated_V = gated_V.unflatten(2, (-1, L)) gated_K = gated_K.unflatten(2, (-1, L)) - next_V = pscan_dim(A, gated_V, init_rec_V, dim=2) - next_K = pscan_dim(A, gated_K, init_rec_K, dim=2) - - next_V = next_V.flatten(2, 3) - next_K = next_K.flatten(2, 3) + next_V = pscan_dim(A, gated_V, init_rec_V, dim=2).flatten(2, 3) + next_K = pscan_dim(A, gated_K, init_rec_K, dim=2).flatten(2, 3) return next_V, next_K @@ -639,20 +622,26 @@ class Caterpillar(nn.Module): next_V, next_K = recurrence(G, V, K) - if self.training and self.proba_gate_dropout > 0.0: + if self.training and self.gate_dropout_proba > 0.0: # G is NxHxRxT where r is the caterpillar's row. warnings.warn("gate dropout", RuntimeWarning) + if self.gate_dropout_sync: + shape_kill = (N, 1, 1) + else: + shape_kill = (N, H, R) + # Pick a point in each of the NxHxR timeline and set this # entry and the following to 1 kill = ( - torch.rand(N, H, R, t1 - t0, device=G.device).sort(dim=3).indices == 0 + torch.rand(*shape_kill, t1 - t0, device=G.device).sort(dim=3).indices + == 0 ).cumsum(dim=3) # Keep these mask for only some of the NxHxR kill = kill * ( - torch.rand(N, H, R, 1, device=G.device) <= self.proba_gate_dropout + torch.rand(*shape_kill, 1, device=G.device) <= self.gate_dropout_proba ) # The coefficient to keep are the complementary @@ -660,11 +649,15 @@ class Caterpillar(nn.Module): masked_next_V, masked_next_K = recurrence(G * mask, V, K) - next_V = next_V.detach() + (masked_next_V - masked_next_V.detach()) / ( - 1 - self.proba_gate_dropout + if self.gate_dropout_replace: + next_V = next_V.detach() + next_K = next_K.detach() + + next_V = next_V + (masked_next_V - masked_next_V.detach()) / ( + 1 - self.gate_dropout_proba ) - next_K = next_K.detach() + (masked_next_K - masked_next_K.detach()) / ( - 1 - self.proba_gate_dropout + next_K = next_K + (masked_next_K - masked_next_K.detach()) / ( + 1 - self.gate_dropout_proba ) self.rec_V[:, :, t0:t1] = next_V @@ -730,7 +723,7 @@ class QKVAttention(nn.Module): causal=False, attention_dropout=0.0, logger=print, - **kwargs, + args=None, ): super().__init__() @@ -823,7 +816,7 @@ class MyGPT(nn.Module): len_max=1e5, attention_layer="kvrec", logger=print, - **kwargs, + args=None, ): super().__init__() @@ -861,7 +854,7 @@ class MyGPT(nn.Module): causal=causal, attention_dropout=dropout, logger=logger, - **kwargs, + args=args, ) elif attention_layer == "dumbrec": return DumbRec( @@ -872,7 +865,7 @@ class MyGPT(nn.Module): nb_lines=nb_lines, attention_dropout=dropout, logger=logger, - **kwargs, + args=args, ) elif attention_layer == "kvrec": return KVRec( @@ -883,7 +876,7 @@ class MyGPT(nn.Module): nb_lines=nb_lines, attention_dropout=dropout, logger=logger, - **kwargs, + args=args, ) elif attention_layer == "caterpillar": return Caterpillar( @@ -895,7 +888,7 @@ class MyGPT(nn.Module): caterpillar_height=self.caterpillar_height, attention_dropout=dropout, logger=logger, - **kwargs, + args=args, ) else: raise ValueError(f"Unknown attention type {attention_layer}.")