3 # Any copyright is dedicated to the Public Domain.
4 # https://creativecommons.org/publicdomain/zero/1.0/
6 # Written by Francois Fleuret <francois@fleuret.org>
8 # This is an implementation from scratch of a "GPT", that is a model
9 # composed of several causal self-attention blocks. It is equipped
10 # with a caching mechanism for keys and values to avoid a O(N^3) cost
11 # for auto-regression.
18 from torch.nn import functional as F
20 ######################################################################
22 # A BracketedSequence is a BxTx... tensor with a first and a nb time
25 # Modules able to process it expect that they will have to process a
26 # first bracket starting at t=0, followed by a succession of brackets
27 # that move forward in time, do not overlap, and cover the axis T with
30 # Although it is more general, for a classical prompt-conditioned
31 # auto-regressive process it will be a first bracket starting at 0 and
32 # of arbitrary length for the "prompt", followed by brackets of length
33 # 1 for the successive tokens.
35 # Modules able to process brackets may implement a cache that is
36 # resetted when the input bracket starts at t=0
39 class BracketedSequence:
40 def __init__(self, x, first=None, nb=None):
42 self.first = 0 if first is None else first
43 self.nb = x.size(1) if nb is None else nb
46 return self.x[:, self.first : self.first + self.nb]
49 ######################################################################
52 class CacheWrapper(nn.Module):
53 def __init__(self, *f):
55 self.f = f[0] if len(f) == 1 else nn.Sequential(*f)
57 def forward(self, bs):
59 y = self.f(bs.slice())
60 self.cache_y = y.new(*((y.size(0), bs.x.size(1)) + y.size()[2:]))
61 self.cache_y[:, bs.first : bs.first + bs.nb] = y
63 self.cache_y[:, bs.first : bs.first + bs.nb] = self.f(bs.slice())
65 return BracketedSequence(self.cache_y, bs.first, bs.nb)
68 ##############################
71 class WithResidual(nn.Module):
72 def __init__(self, *f):
74 self.f = f[0] if len(f) == 1 else nn.Sequential(*f)
76 def forward(self, bs):
77 return BracketedSequence(bs.x + self.f(bs).x, bs.first, bs.nb)
80 ##############################
83 class AddPositionalEncoding(nn.Module):
84 def __init__(self, len_max):
86 self.len_max = len_max
88 # [Vaswani et al 2018] PE_{t,2i} = sin(t/(L^{2i/D})), PE_{t,2i+1} = cos(t/(L^{2i/D}))
90 def forward(self, bs):
92 t = torch.arange(bs.x.size(1), dtype=bs.x.dtype, device=bs.x.device)[
95 j = torch.arange(bs.x.size(2), dtype=bs.x.dtype, device=bs.x.device)[
100 t / (self.len_max ** ((j - k) / bs.x.size(2))) + math.pi / 2 * k
102 self.cache_y = bs.x.new(bs.x.size())
104 self.cache_y[:, bs.first : bs.first + bs.nb] = (
105 bs.slice() + self.pe[bs.first : bs.first + bs.nb]
108 return BracketedSequence(self.cache_y, bs.first, bs.nb)
111 ##############################
114 class QKVAttention(nn.Module):
116 self, dim_in, dim_qk, dim_v, nb_heads=1, causal=False, attention_dropout=0.0
121 return nn.Parameter(torch.randn(*d) / math.sqrt(d[-1]))
123 assert causal, "TODO: Switch off the cache when non-causal!!!"
125 self.attention_dropout = attention_dropout
127 self.w_q = randw(nb_heads, dim_qk, dim_in)
128 self.w_k = randw(nb_heads, dim_qk, dim_in)
129 self.w_v = randw(nb_heads, dim_v, dim_in)
130 self.w_o = randw(dim_v * nb_heads, dim_in)
132 def forward(self, bs_q):
136 self.cache_k = x_q.new_zeros(
137 x_q.size(0), self.w_k.size(0), x_q.size(1), self.w_k.size(1)
139 self.cache_v = x_q.new_zeros(
140 x_q.size(0), self.w_v.size(0), x_q.size(1), self.w_v.size(1)
142 self.cache_y = x_q.new_zeros(x_q.size(0), x_q.size(1), self.w_o.size(1))
145 "ntc,hdc->nhtd", x_q[:, bs_q.first : bs_q.first + bs_q.nb], self.w_q
148 self.cache_k[:, :, bs_q.first : bs_q.first + bs_q.nb] = torch.einsum(
149 "ntc,hdc->nhtd", x_q[:, bs_q.first : bs_q.first + bs_q.nb], self.w_k
151 self.cache_v[:, :, bs_q.first : bs_q.first + bs_q.nb] = torch.einsum(
152 "ntc,hdc->nhtd", x_q[:, bs_q.first : bs_q.first + bs_q.nb], self.w_v
156 "nhtd,nhsd->nhts", q, self.cache_k[:, :, : bs_q.first + bs_q.nb]
157 ) / math.sqrt(self.w_q.size(1))
161 self.cache_attzero = (
162 torch.arange(x_q.size(1), device=q.device)[None, None, :, None]
163 < torch.arange(x_q.size(1), device=q.device)[None, None, None, :]
167 :, :, bs_q.first : bs_q.first + bs_q.nb, : bs_q.first + bs_q.nb
173 a = F.dropout(a, self.attention_dropout, self.training)
176 "nhts,nhsd->nthd", a, self.cache_v[:, :, : bs_q.first + bs_q.nb]
179 self.cache_y[:, bs_q.first : bs_q.first + bs_q.nb] = y @ self.w_o
181 return BracketedSequence(self.cache_y, bs_q.first, bs_q.nb)
184 ##############################
187 class MyGPT(nn.Module):
202 assert dim_model % nb_heads == 0
204 self.embedding = nn.Sequential(
205 CacheWrapper(nn.Embedding(vocabulary_size, dim_model), nn.Dropout(dropout)),
206 AddPositionalEncoding(len_max),
211 for b in range(nb_blocks):
214 CacheWrapper(nn.LayerNorm((dim_model,))),
218 dim_v=dim_model // nb_heads,
221 attention_dropout=dropout,
226 nn.LayerNorm((dim_model,)),
227 nn.Linear(in_features=dim_model, out_features=dim_hidden),
229 nn.Linear(in_features=dim_hidden, out_features=dim_model),
235 self.trunk = nn.Sequential(*trunk_blocks)
237 self.readout = CacheWrapper(
238 nn.Linear(in_features=dim_model, out_features=vocabulary_size)
241 with torch.no_grad():
242 for m in self.modules():
243 if isinstance(m, nn.Embedding):
244 m.weight.normal_(mean=0, std=2e-2)
245 elif isinstance(m, nn.LayerNorm):
249 def forward(self, bs):
250 bs = BracketedSequence(F.pad(bs.x, (1, -1)), bs.first, bs.nb)
251 bs = self.embedding(bs)
253 bs = self.readout(bs)
256 # ar_mask is a tensor with 0s and 1s, of same shape as input, with
257 # 1s where tokens should be generated. The others are kept
260 def masked_inplace_autoregression(
261 self, input, ar_mask, forbidden_tokens=None, deterministic_synthesis=False
263 to_generate = (ar_mask.sum(0) > 0).nonzero()
264 if to_generate.min() > 0:
266 BracketedSequence(input, 0, to_generate.min())
267 ) # Needed to initialize the model's cache
268 for s in range(to_generate.min(), to_generate.max() + 1):
269 output = self(BracketedSequence(input, s, 1)).x
270 logits = output[:, s]
271 if forbidden_tokens is not None:
272 logits = logits.masked_fill(forbidden_tokens, float("-inf"))
273 if deterministic_synthesis:
274 t_next = logits.argmax(1)
276 dist = torch.distributions.categorical.Categorical(logits=logits)
277 t_next = dist.sample()
278 input[:, s] = ar_mask[:, s] * t_next + (1 - ar_mask[:, s]) * input[:, s]
281 ######################################################################
283 if __name__ == "__main__":
284 print("Basic check.")
287 x = torch.randint(vocabulary_size, (1, 5))
290 vocabulary_size=vocabulary_size,
302 y1 = model(BracketedSequence(x)).x
303 y2 = torch.randn_like(y1)
304 for s in range(x.size(1)):
305 z = model(BracketedSequence(x, s, 1))
308 print(f"error={((y1 - y2).norm() / (y1.norm() + y2.norm())).item()}")
310 ######################################################################