##############################
-class PositionalEncoding(nn.Module):
+class AddPositionalEncoding(nn.Module):
def __init__(self, len_max):
super().__init__()
self.len_max = len_max
- # From Vaswani et al 2018
- # PE_{t,2i} = sin(t/(L^{2i/D}))
- # PE_{t,2i+1} = cos(t/(L^{2i/D}))
+ # [Vaswani et al 2018] PE_{t,2i} = sin(t/(L^{2i/D})), PE_{t,2i+1} = cos(t/(L^{2i/D}))
def forward(self, x):
t = torch.arange(x.size(1), dtype = x.dtype, device = x.device)[:, None]
j = torch.arange(x.size(2), dtype = x.dtype, device = x.device)[None, :]
self.embedding = nn.Sequential(
nn.Embedding(vocabulary_size, dim_model),
nn.Dropout(dropout),
- PositionalEncoding(len_max),
+ AddPositionalEncoding(len_max),
)
trunk_blocks = [ ]