X-Git-Url: https://fleuret.org/cgi-bin/gitweb/gitweb.cgi?a=blobdiff_plain;ds=inline;f=mygpt.py;h=a17849181edafb878c6c55cc45ad0422b01c1dce;hb=8a548630c88957264306db4354e880414b0fa8ef;hp=ac1c55e84d91fecb06b453533f1800aead640ed7;hpb=00b2d5ed01fb523fbc4e699f0419329efbee0ea8;p=culture.git diff --git a/mygpt.py b/mygpt.py index ac1c55e..a178491 100755 --- a/mygpt.py +++ b/mygpt.py @@ -46,7 +46,7 @@ class BracketedSequence: return self.x[:, self.first : self.first + self.nb] def complete(self): - return self.first == 0 and self.nb == x.size(1) + return self.first == 0 and self.nb == self.x.size(1) ###################################################################### @@ -169,9 +169,6 @@ class QKVAttention(nn.Module): "nhtd,nhsd->nhts", q, self.cache_k[:, :, : bs_q.first + bs_q.nb] ) / math.sqrt(self.w_q.size(1)) - if self.record_attention: - self.a = a - if self.causal: if bs_q.first == 0: self.cache_attzero = ( @@ -186,6 +183,10 @@ class QKVAttention(nn.Module): ) a = a.softmax(dim=3) + + if self.record_attention: + self.a = a + a = F.dropout(a, self.attention_dropout, self.training) y = torch.einsum( @@ -263,6 +264,7 @@ class MyGPT(nn.Module): m.weight.fill_(1.0) def forward(self, bs): + # print(f"GENERATE {bs.first} {bs.first+bs.nb}") bs = BracketedSequence(F.pad(bs.x, (1, -1)), bs.first, bs.nb) bs = self.embedding(bs) bs = self.trunk(bs) @@ -274,8 +276,15 @@ class MyGPT(nn.Module): # unchanged. def masked_inplace_autoregression( - self, input, ar_mask, forbidden_tokens=None, deterministic_synthesis=False + self, + input, + ar_mask, + temperature=1.0, + deterministic_synthesis=False, + forbidden_tokens=None, + forced_biases=None, ): + sum_logits = 0 to_generate = (ar_mask.sum(0) > 0).nonzero() if to_generate.min() > 0: self( @@ -283,16 +292,23 @@ class MyGPT(nn.Module): ) # Needed to initialize the model's cache for s in range(to_generate.min(), to_generate.max() + 1): output = self(BracketedSequence(input, s, 1)).x - logits = output[:, s] + logits = output[:, s] / temperature if forbidden_tokens is not None: logits = logits.masked_fill(forbidden_tokens, float("-inf")) + if forced_biases is not None: + logits = logits + forced_biases[None, :] if deterministic_synthesis: t_next = logits.argmax(1) else: dist = torch.distributions.categorical.Categorical(logits=logits) t_next = dist.sample() + sum_logits += logits.log_softmax(dim=-1)[ + torch.arange(t_next.size(0)), t_next + ].sum() input[:, s] = ar_mask[:, s] * t_next + (1 - ar_mask[:, s]) * input[:, s] + return sum_logits + def record_attention(self, v=True): for m in self.modules(): if isinstance(m, QKVAttention):