3 # Any copyright is dedicated to the Public Domain.
4 # https://creativecommons.org/publicdomain/zero/1.0/
6 # Written by Francois Fleuret <francois@fleuret.org>
8 # This is an implementation from scratch of a "GPT", that is a model
9 # composed of several causal self-attention blocks. It is equipped
10 # with a caching mechanism for keys and values to avoid a O(N^3) cost
11 # for auto-regression.
18 from torch.nn import functional as F
20 ######################################################################
22 # A BracketedSequence is a BxTx... tensor with a first and a nb time
25 # Modules able to process it expect that they will have to process a
26 # first bracket starting at t=0, followed by a succession of brackets
27 # that move forward in time, do not overlap, and cover the axis T with
30 # Although it is more general, for a classical prompt-conditioned
31 # auto-regressive process it will be a first bracket starting at 0 and
32 # of arbitrary length for the "prompt", followed by brackets of length
33 # 1 for the successive tokens.
35 # Modules able to process brackets may implement a cache that is
36 # resetted when the input bracket starts at t=0
39 class BracketedSequence:
40 def __init__(self, x, first=None, nb=None):
42 self.first = 0 if first is None else first
43 self.nb = x.size(1) if nb is None else nb
46 return self.x[:, self.first : self.first + self.nb]
49 ######################################################################
52 class CacheWrapper(nn.Module):
53 def __init__(self, *f):
55 self.f = f[0] if len(f) == 1 else nn.Sequential(*f)
57 def forward(self, bs):
59 y = self.f(bs.slice())
60 self.cache_y = y.new(*((y.size(0), bs.x.size(1)) + y.size()[2:]))
61 self.cache_y[:, bs.first : bs.first + bs.nb] = y
63 self.cache_y[:, bs.first : bs.first + bs.nb] = self.f(bs.slice())
70 ##############################
73 class WithResidual(nn.Module):
74 def __init__(self, *f):
76 self.f = f[0] if len(f) == 1 else nn.Sequential(*f)
78 def forward(self, bs):
79 bs.x = bs.x + self.f(bs).x
83 ##############################
86 class AddPositionalEncoding(nn.Module):
87 def __init__(self, len_max):
89 self.len_max = len_max
91 # [Vaswani et al 2018] PE_{t,2i} = sin(t/(L^{2i/D})), PE_{t,2i+1} = cos(t/(L^{2i/D}))
93 def forward(self, bs):
95 t = torch.arange(bs.x.size(1), dtype=bs.x.dtype, device=bs.x.device)[
98 j = torch.arange(bs.x.size(2), dtype=bs.x.dtype, device=bs.x.device)[
103 t / (self.len_max ** ((j - k) / bs.x.size(2))) + math.pi / 2 * k
105 self.cache_y = bs.x.new(bs.x.size())
107 self.cache_y[:, bs.first : bs.first + bs.nb] = (
108 bs.slice() + self.pe[bs.first : bs.first + bs.nb]
116 ##############################
119 class QKVAttention(nn.Module):
121 self, dim_in, dim_qk, dim_v, nb_heads=1, causal=False, attention_dropout=0.0
126 return nn.Parameter(torch.randn(*d) / math.sqrt(d[-1]))
129 self.attention_dropout = attention_dropout
131 self.w_q = randw(nb_heads, dim_qk, dim_in)
132 self.w_k = randw(nb_heads, dim_qk, dim_in)
133 self.w_v = randw(nb_heads, dim_v, dim_in)
134 self.w_o = randw(dim_v * nb_heads, dim_in)
136 def forward(self, bs_q):
140 self.cache_k = x_q.new_zeros(
141 x_q.size(0), self.w_k.size(0), x_q.size(1), self.w_k.size(1)
143 self.cache_v = x_q.new_zeros(
144 x_q.size(0), self.w_v.size(0), x_q.size(1), self.w_v.size(1)
146 self.cache_y = x_q.new_zeros(x_q.size(0), x_q.size(1), self.w_o.size(1))
149 "ntc,hdc->nhtd", x_q[:, bs_q.first : bs_q.first + bs_q.nb], self.w_q
151 self.cache_k[:, :, bs_q.first : bs_q.first + bs_q.nb] = torch.einsum(
152 "ntc,hdc->nhtd", x_q[:, bs_q.first : bs_q.first + bs_q.nb], self.w_k
154 self.cache_v[:, :, bs_q.first : bs_q.first + bs_q.nb] = torch.einsum(
155 "ntc,hdc->nhtd", x_q[:, bs_q.first : bs_q.first + bs_q.nb], self.w_v
159 "nhtd,nhsd->nhts", q, self.cache_k[:, :, : bs_q.first + bs_q.nb]
160 ) / math.sqrt(self.w_q.size(1))
164 self.cache_attzero = (
165 torch.arange(x_q.size(1), device=q.device)[None, None, :, None]
166 < torch.arange(x_q.size(1), device=q.device)[None, None, None, :]
170 :, :, bs_q.first : bs_q.first + bs_q.nb, : bs_q.first + bs_q.nb
176 a = F.dropout(a, self.attention_dropout, self.training)
179 "nhts,nhsd->nthd", a, self.cache_v[:, :, : bs_q.first + bs_q.nb]
182 self.cache_y[:, bs_q.first : bs_q.first + bs_q.nb] = y @ self.w_o
184 bs_q.x = self.cache_y
189 ##############################
192 class MyGPT(nn.Module):
207 assert dim_model % nb_heads == 0
209 self.embedding = nn.Sequential(
210 CacheWrapper(nn.Embedding(vocabulary_size, dim_model), nn.Dropout(dropout)),
211 AddPositionalEncoding(len_max),
216 for b in range(nb_blocks):
219 CacheWrapper(nn.LayerNorm((dim_model,))),
223 dim_v=dim_model // nb_heads,
226 attention_dropout=dropout,
231 nn.LayerNorm((dim_model,)),
232 nn.Linear(in_features=dim_model, out_features=dim_hidden),
234 nn.Linear(in_features=dim_hidden, out_features=dim_model),
240 self.trunk = nn.Sequential(*trunk_blocks)
242 self.readout = CacheWrapper(
243 nn.Linear(in_features=dim_model, out_features=vocabulary_size)
246 with torch.no_grad():
247 for m in self.modules():
248 if isinstance(m, nn.Embedding):
249 m.weight.normal_(mean=0, std=2e-2)
250 elif isinstance(m, nn.LayerNorm):
254 def forward(self, bs):
255 bs.x = F.pad(bs.x, (1, -1))
256 bs = self.embedding(bs)
258 bs = self.readout(bs)
262 ######################################################################
264 if __name__ == "__main__":
265 print("Basic check.")
268 x = torch.randint(vocabulary_size, (9, 7))
271 vocabulary_size=vocabulary_size,
282 y1 = model(BracketedSequence(x)).x
284 y2 = torch.randn_like(y1)
285 for s in range(x.size(1)):
286 z = model(BracketedSequence(x, s, 1))
289 # print(y1.max(dim = 2).values)
290 # print(y2.max(dim = 2).values)
291 print(f"error={((y1 - y2).norm() / (y1.norm() + y2.norm())).item()}")
293 ######################################################################