Update.
[flatland.git] / test.py
1 #!/usr/bin/env python-for-pytorch
2
3 import torch
4 import torchvision
5 from torchvision import datasets
6
7 from _ext import flatland
8
9 ######################################################################
10
11 def sequences_to_image(x):
12     from PIL import Image
13
14     nb_sequences = x.size(0)
15     nb_images_per_sequences = x.size(1)
16     nb_channels = 3
17
18     if x.size(2) != nb_channels:
19         print('Can only handle 3 channel tensors.')
20         exit(1)
21
22     height = x.size(3)
23     width = x.size(4)
24     gap = 1
25     gap_color = (0, 128, 255)
26
27     result = torch.ByteTensor(nb_channels,
28                               gap + nb_sequences * (height + gap),
29                               gap + nb_images_per_sequences * (width + gap))
30
31     result[0].fill_(gap_color[0])
32     result[1].fill_(gap_color[1])
33     result[2].fill_(gap_color[2])
34
35     for s in range(0, nb_sequences):
36         for i in range(0, nb_images_per_sequences):
37             result.narrow(1, gap + s * (height + gap), height).narrow(2, gap + i * (width + gap), width).copy_(x[s][i])
38
39     result_numpy = result.cpu().byte().transpose(0, 2).transpose(0, 1).numpy()
40
41     return Image.fromarray(result_numpy, 'RGB')
42
43 ######################################################################
44
45 x = flatland.generate_sequence(5, 3, 128, 96)
46
47 sequences_to_image(x).save('sequences.png')