From: Francois Fleuret Date: Sat, 9 Jun 2018 12:11:24 +0000 (+0200) Subject: Update. X-Git-Url: https://fleuret.org/cgi-bin/gitweb/gitweb.cgi?a=commitdiff_plain;h=0a30c677bfb0aac1f9eebc9646728519ca72bf5b;p=pytorch.git Update. --- diff --git a/ae_size.py b/ae_size.py index 7bef9f5..7183937 100755 --- a/ae_size.py +++ b/ae_size.py @@ -18,20 +18,26 @@ def minimal_input_size(w, layer_specs): ###################################################################### -layer_specs = [ (11, 5), (5, 2), (3, 2), (3, 2) ] +# Dummy test -layers = [] -for l in layer_specs: - layers.append(nn.Conv2d(1, 1, l[0], l[1])) +if __name__ == "__main__": -for l in reversed(layer_specs): - layers.append(nn.ConvTranspose2d(1, 1, l[0], l[1])) + layer_specs = [ (11, 5), (5, 2), (3, 2), (3, 2) ] -m = nn.Sequential(*layers) + layers = [] + for l in layer_specs: + layers.append(nn.Conv2d(1, 1, l[0], l[1])) -h = minimal_input_size(240, layer_specs) -w = minimal_input_size(320, layer_specs) + for l in reversed(layer_specs): + layers.append(nn.ConvTranspose2d(1, 1, l[0], l[1])) -x = Tensor(1, 1, h, w).normal_() + m = nn.Sequential(*layers) -print(x.size(), m(x).size()) + h = minimal_input_size(240, layer_specs) + w = minimal_input_size(320, layer_specs) + + x = Tensor(1, 1, h, w).normal_() + + print(x.size(), m(x).size()) + +######################################################################