-#!/usr/bin/env python-for-pytorch
+#!/usr/bin/env python
+
+# Any copyright is dedicated to the Public Domain.
+# https://creativecommons.org/publicdomain/zero/1.0/
+
+# Written by Francois Fleuret <francois@fleuret.org>
from torch import nn, Tensor
-##########
+######################################################################
-class LazyLinear(nn.Module):
- def __init__(self, out_dim, bias = True):
- super(LazyLinear, self).__init__()
+class LazyLinear(nn.Module):
+ def __init__(self, out_dim, bias=True):
+ super().__init__()
self.out_dim = out_dim
self.bias = bias
self.core = None
if self.training:
self.core = nn.Linear(x.size(1), self.out_dim, self.bias)
else:
- raise RuntimeError('Undefined LazyLinear core in inference mode.')
+ raise RuntimeError("Undefined LazyLinear core in inference mode.")
return self.core(x)
-##########
+ def named_parameters(self, memo=None, prefix=""):
+ assert self.core is not None, "Parameters not yet defined"
+ return super().named_parameters(memo, prefix)
+
+
+######################################################################
+
+if __name__ == "__main__":
+ model = nn.Sequential(
+ nn.Conv2d(3, 8, kernel_size=5),
+ nn.ReLU(inplace=True),
+ LazyLinear(128),
+ nn.ReLU(inplace=True),
+ nn.Linear(128, 10),
+ )
-model = nn.Sequential(nn.Conv2d(1, 8, kernel_size = 5),
- nn.ReLU(inplace = True),
- LazyLinear(128),
- nn.ReLU(inplace = True),
- nn.Linear(128, 10))
+ # model.eval()
-# model.eval()
+ input = Tensor(100, 3, 32, 32).normal_()
-input = Tensor(100, 1, 32, 32).normal_()
+ output = model(input)
-output = model(input)
+ for n, x in model.named_parameters():
+ print(n, x.size())