j = torch.arange(x.size(2), dtype = x.dtype, device = x.device)[None, :]
k = j%2
pe = torch.sin(t / (self.len_max ** ((j - k) / x.size(2))) + math.pi/2 * k)
- return x + pe # Let broadcasting to its job
+ return x + pe
##############################
class QKVAttention(nn.Module):
- def __init__(
- self,
- dim_in, dim_qk, dim_v,
- nb_heads = 1, causal = False, attention_dropout = 0.0
- ):
+ def __init__(self,
+ dim_in, dim_qk, dim_v,
+ nb_heads = 1, causal = False, attention_dropout = 0.0):
super().__init__()
def randw(*d):
def __init__(self,
vocabulary_size,
dim_model, dim_keys, dim_hidden,
- nb_heads, nb_blocks, dropout = 0.):
+ nb_heads, nb_blocks,
+ dropout = 0.0, len_max = 1e5):
super().__init__()
self.embedding = nn.Sequential(
nn.Embedding(vocabulary_size, dim_model),
nn.Dropout(dropout),
- PositionalEncoding(len_max = 1e5),
+ PositionalEncoding(len_max),
)
trunk_blocks = [ ]