projects
/
pytorch.git
/ blobdiff
commit
grep
author
committer
pickaxe
?
search:
re
summary
|
shortlog
|
log
|
commit
|
commitdiff
|
tree
raw
|
inline
| side by side
Update.
[pytorch.git]
/
lazy_linear.py
diff --git
a/lazy_linear.py
b/lazy_linear.py
index
7c9e398
..
b3d3165
100755
(executable)
--- a/
lazy_linear.py
+++ b/
lazy_linear.py
@@
-1,8
+1,8
@@
-#!/usr/bin/env python
-for-pytorch
+#!/usr/bin/env python
from torch import nn, Tensor
from torch import nn, Tensor
-##########
+##########
############################################################
class LazyLinear(nn.Module):
class LazyLinear(nn.Module):
@@
-23,16
+23,25
@@
class LazyLinear(nn.Module):
return self.core(x)
return self.core(x)
-##########
+ def named_parameters(self, memo=None, prefix=''):
+ assert self.core is not None, 'Parameters not yet defined'
+ return super(LazyLinear, self).named_parameters(memo, prefix)
-model = nn.Sequential(nn.Conv2d(1, 8, kernel_size = 5),
- nn.ReLU(inplace = True),
- LazyLinear(128),
- nn.ReLU(inplace = True),
- nn.Linear(128, 10))
+######################################################################
-# model.eval()
+if __name__ == "__main__":
+ model = nn.Sequential(nn.Conv2d(3, 8, kernel_size = 5),
+ nn.ReLU(inplace = True),
+ LazyLinear(128),
+ nn.ReLU(inplace = True),
+ nn.Linear(128, 10))
-input = Tensor(100, 1, 32, 32).normal_()
+ # model.eval()
+
+ input = Tensor(100, 3, 32, 32).normal_()
+
+ output = model(input)
+
+ for n, x in model.named_parameters():
+ print(n, x.size())
-output = model(input)