projects
/
mygptrnn.git
/ blobdiff
commit
grep
author
committer
pickaxe
?
search:
re
summary
|
shortlog
|
log
|
commit
|
commitdiff
|
tree
raw
|
inline
| side by side
Update.
[mygptrnn.git]
/
mygpt.py
diff --git
a/mygpt.py
b/mygpt.py
index
de69a75
..
95e5527
100755
(executable)
--- a/
mygpt.py
+++ b/
mygpt.py
@@
-10,6
+10,8
@@
# with a caching mechanism for keys and values to avoid a O(N^3) cost
# for auto-regression.
# with a caching mechanism for keys and values to avoid a O(N^3) cost
# for auto-regression.
+# This implementation is equipped with RNN layers to replace the MHA
+
import math, warnings
import torch, einops
import math, warnings
import torch, einops
@@
-37,7
+39,7
@@
import ffutils
# 1 for the successive tokens.
#
# Modules able to process brackets may implement a cache that is
# 1 for the successive tokens.
#
# Modules able to process brackets may implement a cache that is
-# resetted when
the input bracket starts at t=0
+# resetted when
init_cache is True
class BracketedSequence:
class BracketedSequence:
@@
-481,8
+483,8
@@
class Caterpillar(nn.Module):
self.caterpillar_height = caterpillar_height
self.attention_dropout = attention_dropout
self.caterpillar_height = caterpillar_height
self.attention_dropout = attention_dropout
- warnings.warn("flash back", RuntimeWarning)
- self.proba_
flashback = 0.1
+ self.proba_flashback = 0.0
+ self.proba_
gate_dropout = 0.0
self.w_G = randw(nb_heads, caterpillar_height, dim_model)
self.b_G = nn.Parameter(
self.w_G = randw(nb_heads, caterpillar_height, dim_model)
self.b_G = nn.Parameter(
@@
-551,7
+553,11
@@
class Caterpillar(nn.Module):
torch.einsum("ntc,hec->nhet", X, self.w_G) + self.b_G[None, :, :, None]
).sigmoid()
torch.einsum("ntc,hec->nhet", X, self.w_G) + self.b_G[None, :, :, None]
).sigmoid()
- # That bas a bad idea
+ if self.training and self.proba_gate_dropout > 0.0:
+ warnings.warn("gate droupout", RuntimeWarning)
+ epsilon = 0.5
+
+ # That was a bad idea
# G = F.dropout(G, self.attention_dropout, self.training)
V = torch.einsum("ntc,hdc->nhtd", X, self.w_V)
# G = F.dropout(G, self.attention_dropout, self.training)
V = torch.einsum("ntc,hdc->nhtd", X, self.w_V)
@@
-559,6
+565,10
@@
class Caterpillar(nn.Module):
# We prepare the arguments for the parallel scan
# We prepare the arguments for the parallel scan
+ # Clip the gating
+ warnings.warn("gating clipping", RuntimeWarning)
+ G = G / G.sum(1, keepdim=True).clamp(min=1)
+
A = 1 - G.sum(1)
gated_V = torch.einsum("nhet,nhtd->netd", G, V)
gated_K = torch.einsum("nhet,nhtd->netd", G, K)
A = 1 - G.sum(1)
gated_V = torch.einsum("nhet,nhtd->netd", G, V)
gated_K = torch.einsum("nhet,nhtd->netd", G, K)
@@
-585,6
+595,7
@@
class Caterpillar(nn.Module):
self.rec_K[:, :, t0:t1] = next_K.flatten(2, 3)
if self.training and self.proba_flashback > 0.0:
self.rec_K[:, :, t0:t1] = next_K.flatten(2, 3)
if self.training and self.proba_flashback > 0.0:
+ warnings.warn("flash back", RuntimeWarning)
# This piece of code makes the assumption that there is
# nothing informative before t0, otherwise we'd have to
# implement a cache for V and K too. This should not be
# This piece of code makes the assumption that there is
# nothing informative before t0, otherwise we'd have to
# implement a cache for V and K too. This should not be
@@
-603,20
+614,18
@@
class Caterpillar(nn.Module):
src_time = t - u - t0
src_head = torch.randint(H, (N, CH, t1 - t0, 1), device=X.device)
src_time = t - u - t0
src_head = torch.randint(H, (N, CH, t1 - t0, 1), device=X.device)
- mask
_V
= (
+ mask = (
torch.rand(N, CH, t1 - t0, DV, device=X.device) <= self.proba_flashback
).long()
torch.rand(N, CH, t1 - t0, DV, device=X.device) <= self.proba_flashback
).long()
+
self.rec_V[:, :, t0:t1] = (
self.rec_V[:, :, t0:t1] = (
- mask
_V
* V[n, src_head, src_time, dv]
- + (1 - mask
_V
) * self.rec_V[:, :, t0:t1]
+ mask * V[n, src_head, src_time, dv]
+ + (1 - mask) * self.rec_V[:, :, t0:t1]
)
)
- mask_K = (
- torch.rand(N, CH, t1 - t0, DK, device=X.device) <= self.proba_flashback
- ).long()
self.rec_K[:, :, t0:t1] = (
self.rec_K[:, :, t0:t1] = (
- mask
_K
* K[n, src_head, src_time, dk]
- + (1 - mask
_K
) * self.rec_K[:, :, t0:t1]
+ mask * K[n, src_head, src_time, dk]
+ + (1 - mask) * self.rec_K[:, :, t0:t1]
)
######################################################################
)
######################################################################
@@
-765,7
+774,6
@@
class MyGPT(nn.Module):
nb_blocks,
nb_lines=None,
caterpillar_height=None,
nb_blocks,
nb_lines=None,
caterpillar_height=None,
- dim_rec_v=-1,
causal=False,
dropout=0.0,
len_max=1e5,
causal=False,
dropout=0.0,
len_max=1e5,
@@
-773,7
+781,12
@@
class MyGPT(nn.Module):
):
super().__init__()
):
super().__init__()
- assert attention_layer in {"mha", "dumbrec", "kvrec", "caterpillar"}
+ assert attention_layer in {
+ "mha",
+ "dumbrec",
+ "kvrec",
+ "caterpillar",
+ }, f"Unknown attention operator {attention_layer}."
if attention_layer == "caterpillar":
assert nb_lines % caterpillar_height == 0
if attention_layer == "caterpillar":
assert nb_lines % caterpillar_height == 0
@@
-806,7
+819,7
@@
class MyGPT(nn.Module):
return DumbRec(
dim_model=dim_model,
dim_qk=dim_keys,
return DumbRec(
dim_model=dim_model,
dim_qk=dim_keys,
- dim_v=dim_
rec_v
,
+ dim_v=dim_
model // nb_heads
,
nb_heads=nb_heads,
nb_lines=nb_lines,
attention_dropout=dropout,
nb_heads=nb_heads,
nb_lines=nb_lines,
attention_dropout=dropout,
@@
-815,7
+828,7
@@
class MyGPT(nn.Module):
return KVRec(
dim_model=dim_model,
dim_qk=dim_keys,
return KVRec(
dim_model=dim_model,
dim_qk=dim_keys,
- dim_v=dim_
rec_v
,
+ dim_v=dim_
model // nb_heads
,
nb_heads=nb_heads,
nb_lines=nb_lines,
attention_dropout=dropout,
nb_heads=nb_heads,
nb_lines=nb_lines,
attention_dropout=dropout,
@@
-824,7
+837,7
@@
class MyGPT(nn.Module):
return Caterpillar(
dim_model=dim_model,
dim_qk=dim_keys,
return Caterpillar(
dim_model=dim_model,
dim_qk=dim_keys,
- dim_v=dim_
rec_v
,
+ dim_v=dim_
model // nb_heads
,
nb_heads=nb_heads,
caterpillar_length=self.caterpillar_length,
caterpillar_height=self.caterpillar_height,
nb_heads=nb_heads,
caterpillar_length=self.caterpillar_length,
caterpillar_height=self.caterpillar_height,