X-Git-Url: https://fleuret.org/cgi-bin/gitweb/gitweb.cgi?a=blobdiff_plain;f=mygpt.py;h=d6879dc08a29f05cac1998bc1ab16e46db07821c;hb=52c6bd98650c846459f10e8303dd2e6c7ba2a68f;hp=37fe6aff89b6b2b07acb1d1d8e5be632a025cc01;hpb=a4145c0493bf53f1d076f98d1ecc36cebf36478c;p=mygpt.git diff --git a/mygpt.py b/mygpt.py index 37fe6af..d6879dc 100755 --- a/mygpt.py +++ b/mygpt.py @@ -36,16 +36,15 @@ class PositionalEncoding(nn.Module): t = torch.arange(x.size(1), dtype = x.dtype, device = x.device)[:, None] j = torch.arange(x.size(2), dtype = x.dtype, device = x.device)[None, :] k = j%2 - return x + torch.sin(t / (self.len_max ** ((j - k) / x.size(2))) + math.pi/2 * k)[None, :, :] + pe = torch.sin(t / (self.len_max ** ((j - k) / x.size(2))) + math.pi/2 * k) + return x + pe ############################## class QKVAttention(nn.Module): - def __init__( - self, - dim_in, dim_qk, dim_v, - nb_heads = 1, causal = False, attention_dropout = 0.0 - ): + def __init__(self, + dim_in, dim_qk, dim_v, + nb_heads = 1, causal = False, attention_dropout = 0.0): super().__init__() def randw(*d): @@ -57,7 +56,7 @@ class QKVAttention(nn.Module): self.w_q = randw(nb_heads, dim_qk, dim_in) self.w_k = randw(nb_heads, dim_qk, dim_in) self.w_v = randw(nb_heads, dim_v, dim_in) - self.w_o = randw(dim_in, dim_v * nb_heads) + self.w_o = randw(dim_v * nb_heads, dim_in) def forward(self, x_q, x_kv = None): if x_kv is None: x_kv = x_q @@ -87,7 +86,8 @@ class MyGPT(nn.Module): def __init__(self, vocabulary_size, dim_model, dim_keys, dim_hidden, - nb_heads, nb_blocks, dropout = 0.): + nb_heads, nb_blocks, + dropout = 0.0, len_max = 1e5): super().__init__() @@ -96,7 +96,7 @@ class MyGPT(nn.Module): self.embedding = nn.Sequential( nn.Embedding(vocabulary_size, dim_model), nn.Dropout(dropout), - PositionalEncoding(len_max = 1e5), + PositionalEncoding(len_max), ) trunk_blocks = [ ] @@ -104,17 +104,17 @@ class MyGPT(nn.Module): for _ in range(nb_blocks): trunk_blocks += [ Residual( - nn.LayerNorm(dim_model), + nn.LayerNorm((dim_model,)), QKVAttention( dim_in = dim_model, - dim_qk = dim_keys, dim_v = dim_model // nb_heads, + dim_qk = dim_keys, + dim_v = dim_model // nb_heads, nb_heads = nb_heads, causal = True, attention_dropout = dropout ), - nn.Linear(in_features = dim_model, out_features = dim_model), ), Residual( - nn.LayerNorm(dim_model), + nn.LayerNorm((dim_model,)), nn.Linear(in_features = dim_model, out_features = dim_hidden), nn.ReLU(), nn.Linear(in_features = dim_hidden, out_features = dim_model), @@ -127,9 +127,11 @@ class MyGPT(nn.Module): self.readout = nn.Linear(in_features = dim_model, out_features = vocabulary_size) def forward(self, x): + x = F.pad(x, (1, 0)) x = self.embedding(x) x = self.trunk(x) x = self.readout(x) + x = F.pad(x, (0, 0, 0, -1)) return x ###################################################################### @@ -142,7 +144,7 @@ if __name__ == '__main__': model = MyGPT( vocabulary_size = vocabulary_size, - dim_model = 16, dim_keys = 50, dim_hidden = 100, + dim_model = 18, dim_keys = 50, dim_hidden = 100, nb_heads = 2, nb_blocks = 3, dropout = 0.1 )