X-Git-Url: https://fleuret.org/cgi-bin/gitweb/gitweb.cgi?p=flatland.git;a=blobdiff_plain;f=test.py;h=ac3eb7e2506e809d640658094587e1df3a61db95;hp=de408aa994dc3224e1949900e89567bf9af48c66;hb=7e2780052c458a329fc36253b83eb5194ffb7fed;hpb=2cd32038873961c8ff3861efb218fad75fbcbf69 diff --git a/test.py b/test.py index de408aa..ac3eb7e 100755 --- a/test.py +++ b/test.py @@ -2,19 +2,93 @@ import torch import torchvision -from torchvision import datasets +import argparse -from _ext import mylib +from _ext import flatland -x = torch.ByteTensor(4, 5).fill_(0) +###################################################################### -print(x.size()) +parser = argparse.ArgumentParser( + description='Dummy test of the flatland sequence generation.', + formatter_class=argparse.ArgumentDefaultsHelpFormatter +) -mylib.generate_sequence(8, x) +parser.add_argument('--seed', + type = int, default = 0, + help = 'Random seed, < 0 is no seeding') -print(x.size()) +parser.add_argument('--width', + type = int, default = 80, + help = 'Image width') -x = x.float().sub_(128).div_(128) +parser.add_argument('--height', + type = int, default = 80, + help = 'Image height') -for s in range(0, x.size(0)): - torchvision.utils.save_image(x[s], 'example_' + str(s) + '.png') +parser.add_argument('--nb_shapes', + type = int, default = 10, + help = 'Image height') + +parser.add_argument('--nb_sequences', + type = int, default = 1, + help = 'How many sequences to generate') + +parser.add_argument('--nb_images_per_sequences', + type = int, default = 3, + help = 'How many images per sequence') + +parser.add_argument('--randomize_colors', + action='store_true', default=False, + help = 'Should the shapes be of different colors') + +parser.add_argument('--randomize_shape_size', + action='store_true', default=False, + help = 'Should the shapes be of different size') + +args = parser.parse_args() + +if args.seed >= 0: + torch.manual_seed(args.seed) + +###################################################################### + +def sequences_to_image(x, gap = 1, gap_color = (0, 128, 255)): + from PIL import Image + + nb_sequences = x.size(0) + nb_images_per_sequences = x.size(1) + nb_channels = 3 + + if x.size(2) != nb_channels: + print('Can only handle 3 channel tensors.') + exit(1) + + height = x.size(3) + width = x.size(4) + + result = torch.ByteTensor(nb_channels, + gap + nb_sequences * (height + gap), + gap + nb_images_per_sequences * (width + gap)) + + result.copy_(torch.Tensor(gap_color).view(-1, 1, 1).expand_as(result)) + + for s in range(0, nb_sequences): + for i in range(0, nb_images_per_sequences): + result.narrow(1, gap + s * (height + gap), height) \ + .narrow(2, gap + i * (width + gap), width) \ + .copy_(x[s][i]) + + result_numpy = result.cpu().byte().transpose(0, 2).transpose(0, 1).numpy() + + return Image.fromarray(result_numpy, 'RGB') + +###################################################################### + +x = flatland.generate_sequence(args.nb_sequences, + args.nb_images_per_sequences, + args.height, args.width, + args.nb_shapes, + args.randomize_colors, + args.randomize_shape_size) + +sequences_to_image(x, gap = 1, gap_color = (0, 0, 0)).save('sequences.png')