3 # Any copyright is dedicated to the Public Domain.
4 # https://creativecommons.org/publicdomain/zero/1.0/
6 # Written by Francois Fleuret <francois@fleuret.org>
13 from torch.nn import functional as F
15 ##############################
17 class WithResidual(nn.Module):
18 def __init__(self, *f):
20 self.f = f[0] if len(f) == 1 else nn.Sequential(*f)
25 ##############################
27 class AddPositionalEncoding(nn.Module):
28 def __init__(self, len_max):
30 self.len_max = len_max
32 # [Vaswani et al 2018] PE_{t,2i} = sin(t/(L^{2i/D})), PE_{t,2i+1} = cos(t/(L^{2i/D}))
34 t = torch.arange(x.size(1), dtype = x.dtype, device = x.device)[:, None]
35 j = torch.arange(x.size(2), dtype = x.dtype, device = x.device)[None, :]
37 pe = torch.sin(t / (self.len_max ** ((j - k) / x.size(2))) + math.pi/2 * k)
40 ##############################
42 class QKVAttention(nn.Module):
44 dim_in, dim_qk, dim_v,
45 nb_heads = 1, causal = False, attention_dropout = 0.0):
49 return nn.Parameter(torch.randn(*d) / math.sqrt(d[-1]))
52 self.attention_dropout = attention_dropout
54 self.w_q = randw(nb_heads, dim_qk, dim_in)
55 self.w_k = randw(nb_heads, dim_qk, dim_in)
56 self.w_v = randw(nb_heads, dim_v, dim_in)
57 self.w_o = randw(dim_v * nb_heads, dim_in)
59 def forward(self, x_q, x_kv = None):
60 if x_kv is None: x_kv = x_q
62 q = torch.einsum('ntc,hdc->nhtd', x_q, self.w_q)
63 k = torch.einsum('ntc,hdc->nhtd', x_kv, self.w_k)
64 v = torch.einsum('ntc,hdc->nhtd', x_kv, self.w_v)
66 a = torch.einsum('nhtd,nhsd->nhts', q, k) / math.sqrt(q.size(3))
69 mask = torch.arange(a.size(2), device = q.device)[None, None, :, None] \
70 < torch.arange(a.size(3), device = q.device)[None, None, None, :]
71 a = a.masked_fill(mask, float('-inf'))
73 a = a.softmax(dim = 3)
74 a = F.dropout(a, self.attention_dropout, self.training)
75 y = torch.einsum('nhts,nhsd->nthd', a, v).flatten(2)
81 ##############################
83 class MyGPT(nn.Module):
86 dim_model, dim_keys, dim_hidden,
88 dropout = 0.0, len_max = 1e5):
92 assert dim_model % nb_heads == 0
94 self.embedding = nn.Sequential(
95 nn.Embedding(vocabulary_size, dim_model),
97 AddPositionalEncoding(len_max),
102 for _ in range(nb_blocks):
105 nn.LayerNorm((dim_model,)),
109 dim_v = dim_model // nb_heads,
111 causal = True, attention_dropout = dropout
115 nn.LayerNorm((dim_model,)),
116 nn.Linear(in_features = dim_model, out_features = dim_hidden),
118 nn.Linear(in_features = dim_hidden, out_features = dim_model),
123 self.trunk = nn.Sequential(*trunk_blocks)
125 self.readout = nn.Linear(in_features = dim_model, out_features = vocabulary_size)
127 with torch.no_grad():
128 for m in self.modules():
129 if isinstance(m, nn.Embedding):
130 m.weight.normal_(mean = 0, std = 2e-2)
131 elif isinstance(m, nn.LayerNorm):
135 def forward(self, x):
136 x = F.pad(x, (1, -1))
137 x = self.embedding(x)
142 ######################################################################
144 if __name__ == '__main__':
145 print('Basic check.')
148 x = torch.randint(vocabulary_size, (25, 100))
151 vocabulary_size = vocabulary_size,
152 dim_model = 18, dim_keys = 50, dim_hidden = 100,
153 nb_heads = 2, nb_blocks = 3,
159 ######################################################################