2853309befc2e6850c689af3a455df001b97cecc
[flatland.git] / test.py
1 #!/usr/bin/env python-for-pytorch
2
3 import torch
4 import torchvision
5 from torchvision import datasets
6
7 from _ext import flatland
8
9 ######################################################################
10
11 def sequences_to_image(x, gap=1, gap_color = (0, 128, 255)):
12     from PIL import Image
13
14     nb_sequences = x.size(0)
15     nb_images_per_sequences = x.size(1)
16     nb_channels = 3
17
18     if x.size(2) != nb_channels:
19         print('Can only handle 3 channel tensors.')
20         exit(1)
21
22     height = x.size(3)
23     width = x.size(4)
24
25     result = torch.ByteTensor(nb_channels,
26                               gap + nb_sequences * (height + gap),
27                               gap + nb_images_per_sequences * (width + gap))
28
29     result[0].fill_(gap_color[0])
30     result[1].fill_(gap_color[1])
31     result[2].fill_(gap_color[2])
32
33     for s in range(0, nb_sequences):
34         for i in range(0, nb_images_per_sequences):
35             result.narrow(1, gap + s * (height + gap), height) \
36                   .narrow(2, gap + i * (width + gap), width) \
37                   .copy_(x[s][i])
38
39     result_numpy = result.cpu().byte().transpose(0, 2).transpose(0, 1).numpy()
40
41     return Image.fromarray(result_numpy, 'RGB')
42
43 ######################################################################
44
45 x = flatland.generate_sequence(1, 3, 80, 80, True, True)
46
47 sequences_to_image(x, gap = 2, gap_color = (0, 0, 0)).save('sequences.png')