X-Git-Url: https://fleuret.org/cgi-bin/gitweb/gitweb.cgi?a=blobdiff_plain;ds=sidebyside;f=mygpt.py;h=7119c7a4ef12d49d0f1e164a0839437496d955af;hb=db8c21397d370ae16fd6078858c649e2ab14fe4e;hp=3bb3519ac1d70bf38ccb8fe6c88c106c8b1d0e2b;hpb=17c63771f2ca82ce39d8406e377ace2015fe69fc;p=culture.git diff --git a/mygpt.py b/mygpt.py index 3bb3519..7119c7a 100755 --- a/mygpt.py +++ b/mygpt.py @@ -271,44 +271,6 @@ class MyGPT(nn.Module): bs = self.readout(bs) return bs - # ar_mask is a tensor with 0s and 1s, of same shape as input, with - # 1s where tokens should be generated. The others are kept - # unchanged. - - def masked_inplace_autoregression( - self, - input, - ar_mask, - temperature=1.0, - deterministic_synthesis=False, - forbidden_tokens=None, - forced_biases=None, - ): - sum_logits = 0 - to_generate = (ar_mask.sum(0) > 0).nonzero() - if to_generate.min() > 0: - self( - BracketedSequence(input, 0, to_generate.min()) - ) # Needed to initialize the model's cache - for s in range(to_generate.min(), to_generate.max() + 1): - output = self(BracketedSequence(input, s, 1)).x - logits = output[:, s] - if forbidden_tokens is not None: - logits = logits.masked_fill(forbidden_tokens, float("-inf")) - if forced_biases is not None: - logits = logits + forced_biases[None, :] - if deterministic_synthesis: - t_next = logits.argmax(1) - else: - dist = torch.distributions.categorical.Categorical(logits=logits) - t_next = dist.sample() - sum_logits += logits.log_softmax(dim=-1)[ - torch.arange(t_next.size(0)), t_next - ] - input[:, s] = ar_mask[:, s] * t_next + (1 - ar_mask[:, s]) * input[:, s] - - return sum_logits - def record_attention(self, v=True): for m in self.modules(): if isinstance(m, QKVAttention):