Added args.learning_rate_end for an exponential decay.
[mygpt.git] / mygpt.py
index 7ff1035..ebc9a83 100755 (executable)
--- a/mygpt.py
+++ b/mygpt.py
@@ -124,6 +124,14 @@ class MyGPT(nn.Module):
 
         self.readout = nn.Linear(in_features = dim_model, out_features = vocabulary_size)
 
+        with torch.no_grad():
+            for m in self.modules():
+                if isinstance(m, nn.Embedding):
+                    m.weight.normal_(mean = 0, std = 2e-2)
+                elif isinstance(m, nn.LayerNorm):
+                    m.bias.zero_()
+                    m.weight.fill_(1.0)
+
     def forward(self, x):
         x = F.pad(x, (1, -1))
         x = self.embedding(x)