Update.
[culture.git] / sky.py
1 #!/usr/bin/env python
2
3 # Any copyright is dedicated to the Public Domain.
4 # https://creativecommons.org/publicdomain/zero/1.0/
5
6 # Written by Francois Fleuret <francois@fleuret.org>
7
8 import math, sys, tqdm, os
9
10 import torch, torchvision
11
12 from torch import nn
13 from torch.nn import functional as F
14
15 ######################################################################
16
17 import problem
18
19
20 class Sky(problem.Problem):
21     colors = torch.tensor(
22         [
23             [255, 255, 255],
24             [255, 0, 0],
25             [0, 192, 0],
26             [0, 0, 255],
27             [255, 192, 0],
28             [0, 255, 255],
29             [255, 0, 255],
30             [192, 255, 192],
31             [255, 192, 192],
32             [192, 192, 255],
33             [192, 192, 192],
34         ]
35     )
36
37     token_background = 0
38     first_bird_token = 1
39     nb_bird_tokens = colors.size(0) - 1
40
41     token2char = (
42         "_" + "".join([chr(ord("A") + n) for n in range(len(colors) - 1)]) + "><"
43     )
44
45     def nb_token_values(self):
46         return len(self.colors)
47
48     def __init__(
49         self,
50         height=6,
51         width=8,
52         nb_birds=3,
53         speed=2,
54         nb_iterations=2,
55         avoid_collision=True,
56     ):
57         self.height = height
58         self.width = width
59         self.nb_birds = nb_birds
60         self.speed = speed
61         self.nb_iterations = nb_iterations
62         self.avoid_collision = avoid_collision
63
64     def generate_frame_sequences(self, nb):
65         frame_sequences = []
66
67         for _ in tqdm.tqdm(range(nb), dynamic_ncols=True, desc="world generation"):
68             i, j, vi, vj = (
69                 torch.empty(self.nb_birds, dtype=torch.int64),
70                 torch.empty(self.nb_birds, dtype=torch.int64),
71                 torch.empty(self.nb_birds, dtype=torch.int64),
72                 torch.empty(self.nb_birds, dtype=torch.int64),
73             )
74
75             def collision_okay():
76                 if not self.avoid_collision:
77                     return True
78
79                 count = torch.zeros(self.height, self.width, dtype=torch.int64)
80
81                 for n in range(self.nb_birds):
82                     count[i[n], j[n]] += 1
83                     count[i[n] - vi[n], j[n]] += 1
84                     count[i[n], j[n] - vj[n]] += 1
85
86                 return count.max() <= 1
87
88             col = (
89                 torch.randperm(self.colors.size(0) - 1)[: self.nb_birds].sort().values
90                 + 1
91             )
92
93             while True:
94                 while True:
95                     for n in range(self.nb_birds):
96                         while True:
97                             i[n] = torch.randint(self.height, (1,))
98                             j[n] = torch.randint(self.width, (1,))
99                             vm = torch.randint(4, (1,))
100                             vi[n], vj[n] = (vm % 2) * 2 - 1, (vm // 2) * 2 - 1
101                             if (
102                                 i[n] - vi[n] >= 0
103                                 and i[n] - vi[n] < self.height
104                                 and j[n] - vj[n] >= 0
105                                 and j[n] - vj[n] < self.width
106                             ):
107                                 break
108
109                     if collision_okay():
110                         break
111
112                 result = torch.zeros(
113                     self.nb_iterations * self.speed,
114                     self.height,
115                     self.width,
116                     dtype=torch.int64,
117                 )
118
119                 fine = torch.empty(self.nb_iterations * self.speed)
120
121                 t_to_keep = (
122                     torch.arange(self.nb_iterations, device=result.device) * self.speed
123                 )
124
125                 for l in range(self.nb_iterations * self.speed):
126                     fine[l] = collision_okay()
127                     for n in range(self.nb_birds):
128                         c = col[n]
129                         result[l, i[n], j[n]] = c
130                         result[l, i[n] - vi[n], j[n]] = c
131                         result[l, i[n], j[n] - vj[n]] = c
132
133                         if (i[n] == 0 and vi[n] == -1) or (
134                             i[n] == self.height - 1 and vi[n] == 1
135                         ):
136                             vi[n] = -vi[n]
137
138                         if (j[n] == 0 and vj[n] == -1) or (
139                             j[n] == self.width - 1 and vj[n] == 1
140                         ):
141                             vj[n] = -vj[n]
142
143                         i[n] += vi[n]
144                         j[n] += vj[n]
145
146                 result = result[t_to_keep]
147                 fine = fine[t_to_keep]
148
149                 if fine[-1]:
150                     break
151
152             frame_sequences.append(result)
153
154         return frame_sequences
155
156     ######################################################################
157
158     def generate_prompts_and_answers(self, nb):
159         frame_sequences = self.generate_frame_sequences(nb)
160         frame_sequences = torch.cat([x[None] for x in frame_sequences], dim=0)
161         prompts = frame_sequences[:, : frame_sequences.size(1) // 2].flatten(1)
162         answers = frame_sequences[:, frame_sequences.size(1) // 2 :].flatten(1)
163         return prompts, answers
164
165     ######################################################################
166
167     def frame2img(self, x, scale=15):
168         x = x.reshape(-1, self.height, self.width)
169         m = torch.logical_and(
170             x >= 0, x < self.first_bird_token + self.nb_bird_tokens
171         ).long()
172         x = self.colors[x * m].permute(0, 3, 1, 2)
173         s = x.shape
174         x = x[:, :, :, None, :, None].expand(-1, -1, -1, scale, -1, scale)
175         x = x.reshape(s[0], s[1], s[2] * scale, s[3] * scale)
176
177         x[:, :, :, torch.arange(0, x.size(3), scale)] = 0
178         x[:, :, torch.arange(0, x.size(2), scale), :] = 0
179         x = x[:, :, 1:, 1:]
180
181         for n in range(m.size(0)):
182             for i in range(m.size(1)):
183                 for j in range(m.size(2)):
184                     if m[n, i, j] == 0:
185                         for k in range(2, scale - 2):
186                             for l in [0, 1]:
187                                 x[n, :, i * scale + k, j * scale + k - l] = 0
188                                 x[
189                                     n, :, i * scale + scale - 1 - k, j * scale + k - l
190                                 ] = 0
191
192         return x
193
194     def seq2str(self, seq):
195         result = []
196         for s in seq:
197             result.append("".join([self.token2char[v] for v in s]))
198         return result
199
200     def save_image(
201         self,
202         result_dir,
203         filename,
204         prompts,
205         answers,
206         predicted_prompts=None,
207         predicted_answers=None,
208     ):
209         if predicted_prompts is None:
210             predicted_prompts = 255
211
212         if predicted_answers is None:
213             predicted_answers = 255
214
215         def add_frame(x, c, margin):
216             y = x.new_full(
217                 (x.size(0), x.size(1), x.size(2) + 2 * margin, x.size(3) + 2 * margin),
218                 0,
219             )
220             if type(c) is int:
221                 y[...] = c
222             else:
223                 c = c.long()[:, None]
224                 c = c * torch.tensor([192, 192, 192], device=c.device) + (
225                     1 - c
226                 ) * torch.tensor([255, 255, 255], device=c.device)
227                 y[...] = c[:, :, None, None]
228             y[:, :, margin:-margin, margin:-margin] = x
229             return y
230
231         margin = 4
232
233         img_prompts = add_frame(self.frame2img(prompts.to("cpu")), 0, 1)
234         img_answers = add_frame(self.frame2img(answers.to("cpu")), 0, 1)
235
236         # img_prompts = add_frame(img_prompts, 255, margin)
237         # img_answers = add_frame(img_answers, 255, margin)
238
239         img_prompts = add_frame(img_prompts, predicted_prompts, margin)
240         img_answers = add_frame(img_answers, predicted_answers, margin)
241
242         separator = img_prompts.new_full(
243             (img_prompts.size(0), img_prompts.size(1), img_prompts.size(2), margin), 255
244         )
245
246         img = torch.cat([img_prompts, img_answers], dim=3)
247
248         image_name = os.path.join(result_dir, filename)
249         torchvision.utils.save_image(
250             img.float() / 255.0, image_name, nrow=6, padding=margin * 2, pad_value=1.0
251         )
252
253     def save_quizzes(
254         self,
255         result_dir,
256         filename_prefix,
257         prompts,
258         answers,
259         predicted_prompts=None,
260         predicted_answers=None,
261     ):
262         self.save_image(
263             result_dir,
264             filename_prefix + ".png",
265             prompts,
266             answers,
267             predicted_prompts,
268             predicted_answers,
269         )
270
271
272 ######################################################################
273
274 if __name__ == "__main__":
275     import time
276
277     sky = Sky(height=6, width=8, speed=4, nb_iterations=2)
278
279     prompts, answers = sky.generate_prompts_and_answers(4)
280
281     predicted_prompts = torch.rand(prompts.size(0)) < 0.5
282     predicted_answers = torch.rand(answers.size(0)) < 0.5
283
284     sky.save_quizzes(
285         "/tmp", "test", prompts, answers, predicted_prompts, predicted_answers
286     )
287
288     # start_time = time.perf_counter()
289     # token_sequences = sky.generate_token_sequences(nb=64)
290     # delay = time.perf_counter() - start_time
291     # print(f"{token_sequences.size(0)/delay:02f} seq/s")
292
293     # print(sky.seq2str(seq[:4]))
294
295     # for t in range(len(it[0])):
296     # img = torch.cat([sky.frame2img(f[t]) for f in it], dim=0)
297     # torchvision.utils.save_image(
298     # img.float() / 255.0,
299     # f"/tmp/frame_{t:03d}.png",
300     # nrow=8,
301     # padding=6,
302     # pad_value=0,
303     # )
304
305     # m = (torch.rand(seq.size()) < 0.05).long()
306     # seq = (1 - m) * seq + m * 23
307
308     # print(seq.size())
309     # img = sky.seq2img(token_sequences)
310     # print(img.size())
311
312     # torchvision.utils.save_image(
313     # img.float() / 255.0, "/tmp/world.png", nrow=6, padding=6, pad_value=0
314     # )