Update.
[culture.git] / sky.py
1 #!/usr/bin/env python
2
3 # Any copyright is dedicated to the Public Domain.
4 # https://creativecommons.org/publicdomain/zero/1.0/
5
6 # Written by Francois Fleuret <francois@fleuret.org>
7
8 import math, sys, tqdm, os
9
10 import torch, torchvision
11
12 from torch import nn
13 from torch.nn import functional as F
14
15 ######################################################################
16
17 import problem
18
19
20 class Sky(problem.Problem):
21     colors = torch.tensor(
22         [
23             [255, 255, 255],
24             [255, 0, 0],
25             [0, 192, 0],
26             [0, 0, 255],
27             [255, 192, 0],
28             [0, 255, 255],
29             [255, 0, 255],
30             [192, 255, 192],
31             [255, 192, 192],
32             [192, 192, 255],
33             [192, 192, 192],
34         ]
35     )
36
37     token_background = 0
38     first_bird_token = 1
39     nb_bird_tokens = colors.size(0) - 1
40     token_forward = first_bird_token + nb_bird_tokens
41     token_backward = token_forward + 1
42
43     token2char = (
44         "_" + "".join([chr(ord("A") + n) for n in range(len(colors) - 1)]) + "><"
45     )
46
47     def __init__(
48         self,
49         height=6,
50         width=8,
51         nb_birds=3,
52         speed=2,
53         nb_iterations=2,
54         avoid_collision=True,
55     ):
56         self.height = height
57         self.width = width
58         self.nb_birds = nb_birds
59         self.speed = speed
60         self.nb_iterations = nb_iterations
61         self.avoid_collision = avoid_collision
62
63     def direction_tokens(self):
64         return self.token_forward, self.token_backward
65
66     def generate_frame_sequences(self, nb):
67         frame_sequences = []
68
69         for _ in tqdm.tqdm(range(nb), dynamic_ncols=True, desc="world generation"):
70             i, j, vi, vj = (
71                 torch.empty(self.nb_birds, dtype=torch.int64),
72                 torch.empty(self.nb_birds, dtype=torch.int64),
73                 torch.empty(self.nb_birds, dtype=torch.int64),
74                 torch.empty(self.nb_birds, dtype=torch.int64),
75             )
76
77             def collision_okay():
78                 if not self.avoid_collision:
79                     return True
80
81                 count = torch.zeros(self.height, self.width, dtype=torch.int64)
82
83                 for n in range(self.nb_birds):
84                     count[i[n], j[n]] += 1
85                     count[i[n] - vi[n], j[n]] += 1
86                     count[i[n], j[n] - vj[n]] += 1
87
88                 return count.max() <= 1
89
90             col = (
91                 torch.randperm(self.colors.size(0) - 1)[: self.nb_birds].sort().values
92                 + 1
93             )
94
95             while True:
96                 while True:
97                     for n in range(self.nb_birds):
98                         while True:
99                             i[n] = torch.randint(self.height, (1,))
100                             j[n] = torch.randint(self.width, (1,))
101                             vm = torch.randint(4, (1,))
102                             vi[n], vj[n] = (vm % 2) * 2 - 1, (vm // 2) * 2 - 1
103                             if (
104                                 i[n] - vi[n] >= 0
105                                 and i[n] - vi[n] < self.height
106                                 and j[n] - vj[n] >= 0
107                                 and j[n] - vj[n] < self.width
108                             ):
109                                 break
110
111                     if collision_okay():
112                         break
113
114                 result = torch.zeros(
115                     self.nb_iterations, self.height, self.width, dtype=torch.int64
116                 )
117
118                 for l in range(self.nb_iterations):
119                     fine = collision_okay()
120                     for n in range(self.nb_birds):
121                         c = col[n]
122                         result[l, i[n], j[n]] = c
123                         result[l, i[n] - vi[n], j[n]] = c
124                         result[l, i[n], j[n] - vj[n]] = c
125
126                         if (i[n] == 0 and vi[n] == -1) or (
127                             i[n] == self.height - 1 and vi[n] == 1
128                         ):
129                             vi[n] = -vi[n]
130
131                         if (j[n] == 0 and vj[n] == -1) or (
132                             j[n] == self.width - 1 and vj[n] == 1
133                         ):
134                             vj[n] = -vj[n]
135
136                         i[n] += vi[n]
137                         j[n] += vj[n]
138
139                 if fine:
140                     break
141
142             frame_sequences.append(result)
143
144         return frame_sequences
145
146     ######################################################################
147
148     def generate_token_sequences(self, nb):
149         frame_sequences = self.generate_frame_sequences(nb)
150
151         result = []
152
153         for frame_sequence in frame_sequences:
154             a = []
155             if torch.rand(1) < 0.5:
156                 for frame in frame_sequence:
157                     if len(a) > 0:
158                         a.append(torch.tensor([self.token_forward]))
159                     a.append(frame.flatten())
160             else:
161                 for frame in reversed(frame_sequence):
162                     if len(a) > 0:
163                         a.append(torch.tensor([self.token_backward]))
164                     a.append(frame.flatten())
165
166             result.append(torch.cat(a, dim=0)[None, :])
167
168         return torch.cat(result, dim=0)
169
170     ######################################################################
171
172     def frame2img(self, x, scale=15):
173         x = x.reshape(-1, self.height, self.width)
174         m = torch.logical_and(
175             x >= 0, x < self.first_bird_token + self.nb_bird_tokens
176         ).long()
177         x = self.colors[x * m].permute(0, 3, 1, 2)
178         s = x.shape
179         x = x[:, :, :, None, :, None].expand(-1, -1, -1, scale, -1, scale)
180         x = x.reshape(s[0], s[1], s[2] * scale, s[3] * scale)
181
182         x[:, :, :, torch.arange(0, x.size(3), scale)] = 0
183         x[:, :, torch.arange(0, x.size(2), scale), :] = 0
184         x = x[:, :, 1:, 1:]
185
186         for n in range(m.size(0)):
187             for i in range(m.size(1)):
188                 for j in range(m.size(2)):
189                     if m[n, i, j] == 0:
190                         for k in range(2, scale - 2):
191                             for l in [0, 1]:
192                                 x[n, :, i * scale + k, j * scale + k - l] = 0
193                                 x[
194                                     n, :, i * scale + scale - 1 - k, j * scale + k - l
195                                 ] = 0
196
197         return x
198
199     def seq2img(self, seq, scale=15):
200         all = [
201             self.frame2img(
202                 seq[:, : self.height * self.width].reshape(-1, self.height, self.width),
203                 scale,
204             )
205         ]
206
207         separator = torch.full((seq.size(0), 3, self.height * scale - 1, 1), 0)
208
209         t = self.height * self.width
210
211         while t < seq.size(1):
212             direction_tokens = seq[:, t]
213             t += 1
214
215             direction_images = self.colors[
216                 torch.full(
217                     (direction_tokens.size(0), self.height * scale - 1, scale), 0
218                 )
219             ].permute(0, 3, 1, 2)
220
221             for n in range(direction_tokens.size(0)):
222                 if direction_tokens[n] == self.token_forward:
223                     for k in range(scale):
224                         for l in [0, 1]:
225                             direction_images[
226                                 n,
227                                 :,
228                                 (self.height * scale) // 2 - scale // 2 + k - l,
229                                 3 + scale // 2 - abs(k - scale // 2),
230                             ] = 0
231                 elif direction_tokens[n] == self.token_backward:
232                     for k in range(scale):
233                         for l in [0, 1]:
234                             direction_images[
235                                 n,
236                                 :,
237                                 (self.height * scale) // 2 - scale // 2 + k - l,
238                                 3 + abs(k - scale // 2),
239                             ] = 0
240                 else:
241                     for k in range(2, scale - 2):
242                         for l in [0, 1]:
243                             direction_images[
244                                 n,
245                                 :,
246                                 (self.height * scale) // 2 - scale // 2 + k - l,
247                                 k,
248                             ] = 0
249                             direction_images[
250                                 n,
251                                 :,
252                                 (self.height * scale) // 2 - scale // 2 + k - l,
253                                 scale - 1 - k,
254                             ] = 0
255
256             all += [
257                 separator,
258                 direction_images,
259                 separator,
260                 self.frame2img(
261                     seq[:, t : t + self.height * self.width].reshape(
262                         -1, self.height, self.width
263                     ),
264                     scale,
265                 ),
266             ]
267
268             t += self.height * self.width
269
270         return torch.cat(all, dim=3)
271
272     def seq2str(self, seq):
273         result = []
274         for s in seq:
275             result.append("".join([self.token2char[v] for v in s]))
276         return result
277
278     def save_image(self, input, result_dir, filename):
279         img = self.seq2img(input.to("cpu"))
280         image_name = os.path.join(result_dir, filename)
281         torchvision.utils.save_image(img.float() / 255.0, image_name, nrow=6, padding=4)
282
283     def save_quizzes(self, input, result_dir, filename_prefix):
284         self.save_image(input, result_dir, filename_prefix + ".png")
285
286
287 ######################################################################
288
289 if __name__ == "__main__":
290     import time
291
292     sky = Sky(height=6, width=8, speed=2, nb_iterations=2)
293
294     start_time = time.perf_counter()
295     token_sequences = sky.generate_token_sequences(nb=64)
296     delay = time.perf_counter() - start_time
297     print(f"{token_sequences.size(0)/delay:02f} seq/s")
298
299     # print(sky.seq2str(seq[:4]))
300
301     # for t in range(len(it[0])):
302     # img = torch.cat([sky.frame2img(f[t]) for f in it], dim=0)
303     # torchvision.utils.save_image(
304     # img.float() / 255.0,
305     # f"/tmp/frame_{t:03d}.png",
306     # nrow=8,
307     # padding=6,
308     # pad_value=0,
309     # )
310
311     # m = (torch.rand(seq.size()) < 0.05).long()
312     # seq = (1 - m) * seq + m * 23
313
314     # print(seq.size())
315     img = sky.seq2img(token_sequences)
316     # print(img.size())
317
318     torchvision.utils.save_image(
319         img.float() / 255.0, "/tmp/world.png", nrow=6, padding=6, pad_value=0
320     )