X-Git-Url: https://fleuret.org/cgi-bin/gitweb/gitweb.cgi?p=pytorch.git;a=blobdiff_plain;f=ae_size.py;h=067a7fa7bc1d44f4330e7f37fe1e511d3fd92045;hp=7bef9f507f8eb6ff15bbd81b40e0ad4ee2e2d927;hb=HEAD;hpb=2db4624955ad2a1c29f7632f30ac217c045638cf diff --git a/ae_size.py b/ae_size.py index 7bef9f5..49f4a20 100755 --- a/ae_size.py +++ b/ae_size.py @@ -1,37 +1,50 @@ #!/usr/bin/env python +# Any copyright is dedicated to the Public Domain. +# https://creativecommons.org/publicdomain/zero/1.0/ + +# Written by Francois Fleuret + import math from torch import nn from torch import Tensor ###################################################################### + def minimal_input_size(w, layer_specs): - assert w > 0, 'The input is too small' + assert w > 0, "The input is too small" if layer_specs == []: return w else: - k, s = layer_specs[0] - w = math.ceil((w - k) / s) + 1 - w = minimal_input_size(w, layer_specs[1:]) - return int((w - 1) * s + k) + kernel_size, stride = layer_specs[0] + v = int(math.ceil((w - kernel_size) / stride)) + 1 + v = minimal_input_size(v, layer_specs[1:]) + return (v - 1) * stride + kernel_size + ###################################################################### -layer_specs = [ (11, 5), (5, 2), (3, 2), (3, 2) ] +# Dummy test -layers = [] -for l in layer_specs: - layers.append(nn.Conv2d(1, 1, l[0], l[1])) +if __name__ == "__main__": + layer_specs = [(17, 5), (5, 4), (3, 2), (3, 2)] -for l in reversed(layer_specs): - layers.append(nn.ConvTranspose2d(1, 1, l[0], l[1])) + layers = [] -m = nn.Sequential(*layers) + for kernel_size, stride in layer_specs: + layers.append(nn.Conv2d(1, 1, kernel_size, stride)) -h = minimal_input_size(240, layer_specs) -w = minimal_input_size(320, layer_specs) + for kernel_size, stride in reversed(layer_specs): + layers.append(nn.ConvTranspose2d(1, 1, kernel_size, stride)) -x = Tensor(1, 1, h, w).normal_() + m = nn.Sequential(*layers) -print(x.size(), m(x).size()) + h = minimal_input_size(240, layer_specs) + w = minimal_input_size(320, layer_specs) + + x = Tensor(1, 1, h, w).normal_() + + print(x.size(), m(x).size()) + +######################################################################