X-Git-Url: https://fleuret.org/cgi-bin/gitweb/gitweb.cgi?a=blobdiff_plain;f=mygpt.py;h=d0fda7e4182878043e74a260f0676654fc12193f;hb=57a13bdaf395838f93dcd67dce3151e2ed9eb3f1;hp=0400b48b21631db0dc6806d5504d6287f2324357;hpb=ef3bef5253ff719953dfffff28d4122c19acdd77;p=culture.git diff --git a/mygpt.py b/mygpt.py index 0400b48..d0fda7e 100755 --- a/mygpt.py +++ b/mygpt.py @@ -46,7 +46,7 @@ class BracketedSequence: return self.x[:, self.first : self.first + self.nb] def complete(self): - return self.first == 0 and self.nb == x.size(1) + return self.first == 0 and self.nb == self.x.size(1) ###################################################################### @@ -201,6 +201,26 @@ class QKVAttention(nn.Module): ############################## +class NoiseInjector(nn.Module): + def __init__(self): + super().__init__() + self.noise_std = 0.0 + + def forward(self, x): + if self.noise_std > 0: + x = x + torch.randn(x.size(), device=x.device) * self.noise_std + return x + + +def set_noise_injection(model, noise_std): + for m in model.modules(): + if isinstance(m, NoiseInjector): + m.noise_std = noise_std + + +############################## + + class MyGPT(nn.Module): def __init__( self, @@ -228,7 +248,10 @@ class MyGPT(nn.Module): for b in range(nb_blocks): trunk_blocks += [ WithResidual( - CacheWrapper(nn.LayerNorm((dim_model,))), + CacheWrapper( + nn.LayerNorm((dim_model,)), + NoiseInjector(), + ), QKVAttention( dim_in=dim_model, dim_qk=dim_keys, @@ -241,6 +264,7 @@ class MyGPT(nn.Module): WithResidual( CacheWrapper( nn.LayerNorm((dim_model,)), + NoiseInjector(), nn.Linear(in_features=dim_model, out_features=dim_hidden), nn.ReLU(), nn.Linear(in_features=dim_hidden, out_features=dim_model), @@ -264,36 +288,13 @@ class MyGPT(nn.Module): m.weight.fill_(1.0) def forward(self, bs): + # print(f"GENERATE {bs.first} {bs.first+bs.nb}") bs = BracketedSequence(F.pad(bs.x, (1, -1)), bs.first, bs.nb) bs = self.embedding(bs) bs = self.trunk(bs) bs = self.readout(bs) return bs - # ar_mask is a tensor with 0s and 1s, of same shape as input, with - # 1s where tokens should be generated. The others are kept - # unchanged. - - def masked_inplace_autoregression( - self, input, ar_mask, forbidden_tokens=None, deterministic_synthesis=False - ): - to_generate = (ar_mask.sum(0) > 0).nonzero() - if to_generate.min() > 0: - self( - BracketedSequence(input, 0, to_generate.min()) - ) # Needed to initialize the model's cache - for s in range(to_generate.min(), to_generate.max() + 1): - output = self(BracketedSequence(input, s, 1)).x - logits = output[:, s] - if forbidden_tokens is not None: - logits = logits.masked_fill(forbidden_tokens, float("-inf")) - if deterministic_synthesis: - t_next = logits.argmax(1) - else: - dist = torch.distributions.categorical.Categorical(logits=logits) - t_next = dist.sample() - input[:, s] = ar_mask[:, s] * t_next + (1 - ar_mask[:, s]) * input[:, s] - def record_attention(self, v=True): for m in self.modules(): if isinstance(m, QKVAttention):