3 # Any copyright is dedicated to the Public Domain.
4 # https://creativecommons.org/publicdomain/zero/1.0/
6 # Written by Francois Fleuret <francois@fleuret.org>
13 from torch.nn import functional as F
15 ##############################
17 class Residual(nn.Module):
18 def __init__(self, *f):
20 self.f = f[0] if len(f) == 1 else nn.Sequential(*f)
25 ##############################
27 class PositionalEncoding(nn.Module):
28 def __init__(self, len_max):
30 self.len_max = len_max
32 # From Vaswani et al 2018
33 # PE_{t,2i} = sin(t/(L^{2i/D}))
34 # PE_{t,2i+1} = cos(t/(L^{2i/D}))
36 t = torch.arange(x.size(1), dtype = x.dtype, device = x.device)[:, None]
37 j = torch.arange(x.size(2), dtype = x.dtype, device = x.device)[None, :]
39 return x + torch.sin(t / (self.len_max ** ((j - k) / x.size(2))) + math.pi/2 * k)[None, :, :]
41 ##############################
43 class QKVAttention(nn.Module):
44 def __init__(self, dim_in, dim_qk, dim_v, nb_heads = 1, causal = False, attention_dropout = 0.0):
48 return nn.Parameter(torch.empty(*d).normal_(0, 1 / math.sqrt(d[-1])))
50 self.wq = randw(nb_heads, dim_qk, dim_in)
51 self.wk = randw(nb_heads, dim_qk, dim_in)
52 self.wv = randw(nb_heads, dim_v, dim_in)
54 self.attention_dropout = attention_dropout
57 q = torch.einsum('ntc,hdc->nhtd', x, self.wq)
58 k = torch.einsum('ntc,hdc->nhtd', x, self.wk)
59 v = torch.einsum('ntc,hdc->nhtd', x, self.wv)
60 r = math.sqrt(q.size(3))
61 a = torch.einsum('nhtd,nhsd->nhts', q, k).div(r)
63 mask = torch.tril(q.new_ones(a.size(2), a.size(3)))[None, None, :, :] == 0
64 a = a.masked_fill(mask, float('-inf'))
65 a = a.softmax(dim = 3)
66 a = F.dropout(a, self.attention_dropout, self.training)
67 y = torch.einsum('nhts,nhsd->nhtd', a, v)
68 return y.permute(0, 2, 1, 3).flatten(2) # nhtd -> nt(hd)
70 ##############################
72 class MyGPT(nn.Module):
75 dim_model, dim_keys, dim_hidden,
76 nb_heads, nb_blocks, dropout = 0.):
80 assert dim_model % nb_heads == 0
82 self.embedding = nn.Sequential(
83 nn.Embedding(vocabulary_size, dim_model),
85 PositionalEncoding(len_max = 1e5),
90 for _ in range(nb_blocks):
93 nn.LayerNorm(dim_model),
96 dim_qk = dim_keys, dim_v = dim_model // nb_heads,
98 causal = True, attention_dropout = dropout
100 nn.Linear(in_features = dim_model, out_features = dim_model),
103 nn.LayerNorm(dim_model),
104 nn.Linear(in_features = dim_model, out_features = dim_hidden),
106 nn.Linear(in_features = dim_hidden, out_features = dim_model),
111 self.trunk = nn.Sequential(*trunk_blocks)
113 self.readout = nn.Linear(in_features = dim_model, out_features = vocabulary_size)
115 def forward(self, x):
116 x = self.embedding(x)
121 ######################################################################
123 if __name__ == '__main__':
125 x = torch.randint(vocabulary_size, (25, 100))
128 vocabulary_size = vocabulary_size,
129 dim_model = 16, dim_keys = 50, dim_hidden = 100,
130 nb_heads = 2, nb_blocks = 3,
136 ######################################################################