X-Git-Url: https://fleuret.org/cgi-bin/gitweb/gitweb.cgi?a=blobdiff_plain;f=mygpt.py;h=131c822c76620076721cdb3a7722544dd6ea70b2;hb=HEAD;hp=45b7b59bc42f5f4c2ab31a92e8ad732e87226086;hpb=363ce48d64d1a036b86d29564bf6ad367126c2b1;p=picoclvr.git diff --git a/mygpt.py b/mygpt.py index 45b7b59..131c822 100755 --- a/mygpt.py +++ b/mygpt.py @@ -46,7 +46,7 @@ class BracketedSequence: return self.x[:, self.first : self.first + self.nb] def complete(self): - return self.first == 0 and self.nb == x.size(1) + return self.first == 0 and self.nb == self.x.size(1) ###################################################################### @@ -116,7 +116,13 @@ class AddPositionalEncoding(nn.Module): class QKVAttention(nn.Module): def __init__( - self, dim_in, dim_qk, dim_v, nb_heads=1, causal=False, attention_dropout=0.0 + self, + dim_in, + dim_qk, + dim_v, + nb_heads=1, + causal=False, + attention_dropout=0.0, ): super().__init__() @@ -125,6 +131,7 @@ class QKVAttention(nn.Module): self.causal = causal self.attention_dropout = attention_dropout + self.record_attention = False self.w_q = randw(nb_heads, dim_qk, dim_in) self.w_k = randw(nb_heads, dim_qk, dim_in) @@ -176,6 +183,10 @@ class QKVAttention(nn.Module): ) a = a.softmax(dim=3) + + if self.record_attention: + self.a = a + a = F.dropout(a, self.attention_dropout, self.training) y = torch.einsum( @@ -253,6 +264,7 @@ class MyGPT(nn.Module): m.weight.fill_(1.0) def forward(self, bs): + # print(f"GENERATE {bs.first} {bs.first+bs.nb}") bs = BracketedSequence(F.pad(bs.x, (1, -1)), bs.first, bs.nb) bs = self.embedding(bs) bs = self.trunk(bs) @@ -264,7 +276,12 @@ class MyGPT(nn.Module): # unchanged. def masked_inplace_autoregression( - self, input, ar_mask, forbidden_tokens=None, deterministic_synthesis=False + self, + input, + ar_mask, + deterministic_synthesis=False, + forbidden_tokens=None, + forced_biases=None, ): to_generate = (ar_mask.sum(0) > 0).nonzero() if to_generate.min() > 0: @@ -276,6 +293,8 @@ class MyGPT(nn.Module): logits = output[:, s] if forbidden_tokens is not None: logits = logits.masked_fill(forbidden_tokens, float("-inf")) + if forced_biases is not None: + logits = logits + forced_biases[None, :] if deterministic_synthesis: t_next = logits.argmax(1) else: @@ -283,6 +302,18 @@ class MyGPT(nn.Module): t_next = dist.sample() input[:, s] = ar_mask[:, s] * t_next + (1 - ar_mask[:, s]) * input[:, s] + def record_attention(self, v=True): + for m in self.modules(): + if isinstance(m, QKVAttention): + m.record_attention = v + + def retrieve_attention(self): + a = [] + for m in self.modules(): + if isinstance(m, QKVAttention): + a.append(m.a) + return a + ###################################################################### @@ -298,13 +329,12 @@ if __name__ == "__main__": dim_keys=2, dim_hidden=2, nb_heads=2, - nb_blocks=1, + nb_blocks=2, dropout=0.1, causal=True, ) model.eval() - y1 = model(BracketedSequence(x)).x y2 = torch.randn_like(y1) for s in range(x.size(1)):