3 # Any copyright is dedicated to the Public Domain.
4 # https://creativecommons.org/publicdomain/zero/1.0/
6 # Written by Francois Fleuret <francois@fleuret.org>
13 from torch.nn import functional as F
15 ######################################################################
17 # A BracketedSequence is a BxTx... tensor with a first and a nb time
20 # Modules able to process it expect that they will have to process a
21 # first bracket starting at t=0, followed by a succession of brackets
22 # that move forward in time, do not overlap, and cover the axis T with
25 # Although it is more general, for a classical prompt-conditioned
26 # auto-regressive process it will be a first bracket starting at 0 and
27 # of arbitrary length for the "prompt", followed by brackets of length
28 # 1 for the successive tokens.
30 # Modules able to process brackets may implement a cache that is
31 # resetted when the input bracket starts at t=0
34 class BracketedSequence:
35 def __init__(self, x, first=None, nb=None):
37 self.first = 0 if first is None else first
38 self.nb = x.size(1) if nb is None else nb
41 return self.x[:, self.first : self.first + self.nb]
44 ######################################################################
47 class WithResidual(nn.Module):
48 def __init__(self, *f):
50 self.f = f[0] if len(f) == 1 else nn.Sequential(*f)
52 def forward(self, bs):
53 bs.x = bs.x + self.f(bs).x
57 ######################################################################
60 class CacheWrapper(nn.Module):
61 def __init__(self, *f):
63 self.f = f[0] if len(f) == 1 else nn.Sequential(*f)
65 def forward(self, bs):
67 y = self.f(bs.slice())
68 self.cache_y = y.new(*((y.size(0), bs.x.size(1)) + y.size()[2:]))
69 self.cache_y[:, bs.first : bs.first + bs.nb] = y
71 self.cache_y[:, bs.first : bs.first + bs.nb] = self.f(bs.slice())
78 ##############################
81 class AddPositionalEncoding(nn.Module):
82 def __init__(self, len_max):
84 self.len_max = len_max
86 # [Vaswani et al 2018] PE_{t,2i} = sin(t/(L^{2i/D})), PE_{t,2i+1} = cos(t/(L^{2i/D}))
88 def forward(self, bs, order=None):
90 t = torch.arange(bs.x.size(1), dtype=bs.x.dtype, device=bs.x.device)[
93 j = torch.arange(bs.x.size(2), dtype=bs.x.dtype, device=bs.x.device)[
98 t / (self.len_max ** ((j - k) / bs.x.size(2))) + math.pi / 2 * k
101 if order is not None:
102 self.pe = self.pe.gather(1, order.unsqueeze(-1).expand_as(self.pe))
104 self.cache_y = bs.x.new(bs.x.size())
106 self.cache_y[:, bs.first : bs.first + bs.nb] = (
107 bs.slice() + self.pe[bs.first : bs.first + bs.nb]
115 ##############################
118 class QKVAttention(nn.Module):
120 self, dim_in, dim_qk, dim_v, nb_heads=1, causal=False, attention_dropout=0.0
125 return nn.Parameter(torch.randn(*d) / math.sqrt(d[-1]))
128 self.attention_dropout = attention_dropout
130 self.w_q = randw(nb_heads, dim_qk, dim_in)
131 self.w_k = randw(nb_heads, dim_qk, dim_in)
132 self.w_v = randw(nb_heads, dim_v, dim_in)
133 self.w_o = randw(dim_v * nb_heads, dim_in)
135 def forward(self, bs_q):
139 self.cache_k = x_q.new_zeros(
140 x_q.size(0), self.w_k.size(0), x_q.size(1), self.w_k.size(1)
142 self.cache_v = x_q.new_zeros(
143 x_q.size(0), self.w_v.size(0), x_q.size(1), self.w_v.size(1)
145 self.cache_y = x_q.new_zeros(x_q.size(0), x_q.size(1), self.w_o.size(1))
148 "ntc,hdc->nhtd", x_q[:, bs_q.first : bs_q.first + bs_q.nb], self.w_q
150 self.cache_k[:, :, bs_q.first : bs_q.first + bs_q.nb] = torch.einsum(
151 "ntc,hdc->nhtd", x_q[:, bs_q.first : bs_q.first + bs_q.nb], self.w_k
153 self.cache_v[:, :, bs_q.first : bs_q.first + bs_q.nb] = torch.einsum(
154 "ntc,hdc->nhtd", x_q[:, bs_q.first : bs_q.first + bs_q.nb], self.w_v
158 "nhtd,nhsd->nhts", q, self.cache_k[:, :, : bs_q.first + bs_q.nb]
159 ) / math.sqrt(self.w_q.size(1))
163 self.cache_attzero = (
164 torch.arange(x_q.size(1), device=q.device)[None, None, :, None]
165 < torch.arange(x_q.size(1), device=q.device)[None, None, None, :]
169 :, :, bs_q.first : bs_q.first + bs_q.nb, : bs_q.first + bs_q.nb
175 a = F.dropout(a, self.attention_dropout, self.training)
178 "nhts,nhsd->nthd", a, self.cache_v[:, :, : bs_q.first + bs_q.nb]
181 self.cache_y[:, bs_q.first : bs_q.first + bs_q.nb] = y @ self.w_o
183 bs_q.x = self.cache_y
188 ##############################
191 class MyGPT(nn.Module):
206 assert dim_model % nb_heads == 0
208 self.embedding = CacheWrapper(
209 nn.Embedding(vocabulary_size, dim_model), nn.Dropout(dropout)
211 self.pe = AddPositionalEncoding(len_max)
215 for b in range(nb_blocks):
218 CacheWrapper(nn.LayerNorm((dim_model,))),
222 dim_v=dim_model // nb_heads,
225 attention_dropout=dropout,
230 nn.LayerNorm((dim_model,)),
231 nn.Linear(in_features=dim_model, out_features=dim_hidden),
233 nn.Linear(in_features=dim_hidden, out_features=dim_model),
239 self.trunk = nn.Sequential(*trunk_blocks)
241 self.readout = CacheWrapper(
242 nn.Linear(in_features=dim_model, out_features=vocabulary_size)
245 with torch.no_grad():
246 for m in self.modules():
247 if isinstance(m, nn.Embedding):
248 m.weight.normal_(mean=0, std=2e-2)
249 elif isinstance(m, nn.LayerNorm):
253 def forward(self, bs, mode="standard", order=None):
254 bs = BracketedSequence(F.pad(bs.x, (1, -1)), bs.first, bs.nb)
255 if order is not None:
256 order = F.pad(order + 1, (1, -1))
257 bs = self.embedding(bs)
258 bs = self.pe(bs, order)
260 if mode == "standard":
262 bs = self.readout(bs)
270 bs = BracketedSequence(torch.cat(r, -1))
276 ######################################################################
278 if __name__ == "__main__":
279 print("Basic check.")
282 x = torch.randint(vocabulary_size, (9, 7))
285 vocabulary_size=vocabulary_size,
296 y1 = model(BracketedSequence(x)).x
298 y2 = torch.randn_like(y1)
299 for s in range(x.size(1)):
300 z = model(BracketedSequence(x, s, 1))
303 # print(y1.max(dim = 2).values)
304 # print(y2.max(dim = 2).values)
305 print(f"error={((y1 - y2).norm() / (y1.norm() + y2.norm())).item()}")
307 ######################################################################