3 # Any copyright is dedicated to the Public Domain.
4 # https://creativecommons.org/publicdomain/zero/1.0/
6 # Written by Francois Fleuret <francois@fleuret.org>
8 import math, sys, tqdm, os, warnings
10 import torch, torchvision
13 from torch.nn import functional as F
15 ######################################################################
20 class Grids(problem.Problem):
22 ("white", [255, 255, 255]),
24 ("green", [0, 192, 0]),
25 ("blue", [0, 0, 255]),
26 ("yellow", [255, 224, 0]),
27 ("cyan", [0, 255, 255]),
28 ("violet", [224, 128, 255]),
29 ("lightgreen", [192, 255, 192]),
30 ("brown", [165, 42, 42]),
31 ("lightblue", [192, 192, 255]),
32 ("gray", [128, 128, 128]),
35 def __init__(self, device=torch.device("cpu")):
36 self.colors = torch.tensor([c for _, c in self.named_colors])
41 ######################################################################
43 def frame2img(self, x, scale=15):
44 x = x.reshape(x.size(0), self.height, -1)
45 m = torch.logical_and(x >= 0, x < self.nb_token_values()).long()
46 x = self.colors[x * m].permute(0, 3, 1, 2)
48 x = x[:, :, :, None, :, None].expand(-1, -1, -1, scale, -1, scale)
49 x = x.reshape(s[0], s[1], s[2] * scale, s[3] * scale)
51 x[:, :, :, torch.arange(0, x.size(3), scale)] = 0
52 x[:, :, torch.arange(0, x.size(2), scale), :] = 0
55 for n in range(m.size(0)):
56 for i in range(m.size(1)):
57 for j in range(m.size(2)):
59 for k in range(2, scale - 2):
61 x[n, :, i * scale + k, j * scale + k - l] = 0
63 n, :, i * scale + scale - 1 - k, j * scale + k - l
68 def frame2img_(self, x, scale=15):
69 x = x.reshape(x.size(0), self.height, -1)
70 x = self.colors[x].permute(0, 3, 1, 2)
72 x = x[:, :, :, None, :, None].expand(-1, -1, -1, scale, -1, scale)
73 x = x.reshape(s[0], s[1], s[2] * scale, s[3] * scale)
75 x[:, :, :, torch.arange(0, x.size(3), scale)] = 0
76 x[:, :, torch.arange(0, x.size(2), scale), :] = 0
87 predicted_prompts=None,
88 predicted_answers=None,
91 S = self.height * self.width
92 As = prompts[:, 0 * (S + 1) : 0 * (S + 1) + S].view(-1, self.height, self.width)
93 f_As = prompts[:, 1 * (S + 1) : 1 * (S + 1) + S].view(
94 -1, self.height, self.width
96 Bs = prompts[:, 2 * (S + 1) : 2 * (S + 1) + S].view(-1, self.height, self.width)
97 prompts = torch.cat([As, f_As, Bs], dim=2)
98 answers = answers.reshape(answers.size(0), self.height, self.width)
100 if predicted_prompts is None:
101 predicted_prompts = 255
103 if predicted_answers is None:
104 predicted_answers = 255
106 def add_frame(x, c, margin, bottom=False):
108 h, w, di, dj = x.size(2) + margin, x.size(3), 0, 0
111 x.size(2) + 2 * margin,
112 x.size(3) + 2 * margin,
117 y = x.new_full((x.size(0), x.size(1), h, w), 0)
122 c = c.long()[:, None]
124 (1 - ((c == 1).long() + (c == 0).long() + (c == -1).long()))
125 * torch.tensor([64, 64, 64], device=c.device)
126 + (c == 1).long() * torch.tensor([0, 255, 0], device=c.device)
127 + (c == 0).long() * torch.tensor([255, 255, 255], device=c.device)
128 + (c == -1).long() * torch.tensor([255, 0, 0], device=c.device)
130 y[...] = c[:, :, None, None]
132 y[:, :, di : di + x.size(2), dj : dj + x.size(3)] = x
138 img_prompts = torch.cat(
141 add_frame(self.frame2img(x), c=0, margin=1),
145 for x in prompts.to("cpu").split(split_size=self.width, dim=2)
150 h = img_prompts.size(2)
151 img_answers = add_frame(
152 add_frame(self.frame2img(answers.to("cpu")), c=0, margin=1),
157 separator_size = 2 * margin
159 separator = img_prompts.new_full(
169 marker = img_prompts.new_full(
179 # marker[:, :, 0] = 0
180 # marker[:, :, h - 1] = 0
182 for k in range(1, 2 * separator_size - 8):
183 i = k - (separator_size - 4)
184 j = separator_size - 5 - abs(i)
185 marker[:, :, h // 2 - 1 + i, 2 + j] = 0
186 marker[:, :, h // 2 - 1 + i + 1, 2 + j] = 0
197 image_name = os.path.join(result_dir, filename)
198 torchvision.utils.save_image(
206 ######################################################################
208 def nb_token_values(self):
209 return len(self.colors)
211 # That's quite a tensorial spaghetti mess to sample
212 # non-overlapping rectangles quickly, but made the generation of
213 # 100k samples go from 1h50 with a lame pure python code to 3min30s
215 def rec_coo(self, nb_rec, min_height=3, min_width=3):
221 torch.rand(nb_trials * nb_rec, self.height + 1, device=self.device)
233 torch.rand(nb_trials * nb_rec, self.width + 1, device=self.device)
243 i = torch.logical_and(
244 v.sum(dim=-1) >= min_height, h.sum(dim=-1) >= min_width
248 v = v[: v.size(0) - v.size(0) % nb_rec]
249 h = h[: h.size(0) - h.size(0) % nb_rec]
250 v = v.reshape(v.size(0) // nb_rec, nb_rec, -1)
251 h = h.reshape(h.size(0) // nb_rec, nb_rec, -1)
253 r = v[:, :, :, None] * h[:, :, None, :]
255 valid = r.sum(dim=1).flatten(1).max(dim=-1).values == 1
263 av = torch.arange(v.size(2), device=self.device)[None, :]
264 ah = torch.arange(h.size(2), device=self.device)[None, :]
267 (i1.item(), j1.item(), i2.item() + 1, j2.item() + 1)
268 for i1, j1, i2, j2 in zip(
269 v.size(2) - (v[0] * (v.size(2) - av)).max(dim=-1).values,
270 h.size(2) - (h[0] * (h.size(2) - ah)).max(dim=-1).values,
271 (v[0] * av).max(dim=-1).values,
272 (h[0] * ah).max(dim=-1).values,
276 def rec_coo_(self, x, n, min_height=3, min_width=3):
277 collision = x.new(x.size())
283 i1, i2 = torch.randint(x.size(0), (2,))
284 if i1 + min_height <= i2:
287 j1, j2 = torch.randint(x.size(1), (2,))
288 if j1 + min_width <= j2:
290 collision[i1:i2, j1:j2] += 1
291 if collision.max() > 1:
293 result.append((i1, j1, i2, j2))
294 if collision.max() == 1:
298 ######################################################################
300 def task_replace_color(self, A, f_A, B, f_B):
302 c = torch.randperm(len(self.colors) - 1)[: nb_rec + 1] + 1
303 for X, f_X in [(A, f_A), (B, f_B)]:
304 r = self.rec_coo(nb_rec)
305 for n in range(nb_rec):
306 i1, j1, i2, j2 = r[n]
307 X[i1:i2, j1:j2] = c[n]
308 f_X[i1:i2, j1:j2] = c[n if n > 0 else -1]
310 def task_translate(self, A, f_A, B, f_B):
311 di, dj = torch.randint(3, (2,)) - 1
313 c = torch.randperm(len(self.colors) - 1)[:nb_rec] + 1
314 for X, f_X in [(A, f_A), (B, f_B)]:
316 r = self.rec_coo(nb_rec)
317 i1, j1, i2, j2 = r[nb_rec - 1]
320 and i2 + di < X.size(0)
322 and j2 + dj < X.size(1)
326 for n in range(nb_rec):
327 i1, j1, i2, j2 = r[n]
328 X[i1:i2, j1:j2] = c[n]
330 f_X[i1 + di : i2 + di, j1 + dj : j2 + dj] = c[n]
332 f_X[i1:i2, j1:j2] = c[n]
334 def task_grow(self, A, f_A, B, f_B):
335 di, dj = torch.randint(2, (2,)) * 2 - 1
337 c = torch.randperm(len(self.colors) - 1)[:nb_rec] + 1
338 direction = torch.randint(2, (1,))
339 for X, f_X in [(A, f_A), (B, f_B)]:
341 r = self.rec_coo(nb_rec)
342 i1, j1, i2, j2 = r[nb_rec - 1]
343 if i1 + 3 < i2 and j1 + 3 < j2:
346 for n in range(nb_rec):
347 i1, j1, i2, j2 = r[n]
350 X[i1 + 1 : i2 - 1, j1 + 1 : j2 - 1] = c[n]
351 f_X[i1:i2, j1:j2] = c[n]
353 X[i1:i2, j1:j2] = c[n]
354 f_X[i1 + 1 : i2 - 1, j1 + 1 : j2 - 1] = c[n]
356 X[i1:i2, j1:j2] = c[n]
357 f_X[i1:i2, j1:j2] = c[n]
359 def task_color_grow(self, A, f_A, B, f_B):
360 di, dj = torch.randint(2, (2,)) * 2 - 1
362 c = torch.randperm(len(self.colors) - 1)[: 2 * nb_rec] + 1
363 direction = torch.randint(4, (1,))
364 for X, f_X in [(A, f_A), (B, f_B)]:
365 r = self.rec_coo(nb_rec)
366 for n in range(nb_rec):
367 i1, j1, i2, j2 = r[n]
368 X[i1:i2, j1:j2] = c[2 * n]
369 f_X[i1:i2, j1:j2] = c[2 * n]
370 # Not my proudest moment
373 X[i : i + 1, j1:j2] = c[2 * n + 1]
375 f_X[i:i2, j1:j2] = c[2 * n + 1]
377 f_X[i : i + 1, j1:j2] = c[2 * n + 1]
379 i = (i1 + i2 - 1) // 2
380 X[i : i + 1, j1:j2] = c[2 * n + 1]
382 f_X[i1 : i + 1, j1:j2] = c[2 * n + 1]
384 f_X[i : i + 1, j1:j2] = c[2 * n + 1]
387 X[i1:i2, j : j + 1] = c[2 * n + 1]
389 f_X[i1:i2, j:j2] = c[2 * n + 1]
391 f_X[i1:i2, j : j + 1] = c[2 * n + 1]
393 j = (j1 + j2 - 1) // 2
394 X[i1:i2, j : j + 1] = c[2 * n + 1]
396 f_X[i1:i2, j1 : j + 1] = c[2 * n + 1]
398 f_X[i1:i2, j : j + 1] = c[2 * n + 1]
400 def task_frame(self, A, f_A, B, f_B):
402 c = torch.randperm(len(self.colors) - 1)[: nb_rec + 1] + 1
403 for X, f_X in [(A, f_A), (B, f_B)]:
404 r = self.rec_coo(nb_rec)
405 for n in range(nb_rec):
406 i1, j1, i2, j2 = r[n]
407 X[i1:i2, j1:j2] = c[n]
408 f_X[i1:i2, j1:j2] = c[n]
410 f_X[i1 + 1 : i2 - 1, j1 + 1 : j2 - 1] = 0
412 def task_detect(self, A, f_A, B, f_B):
414 c = torch.randperm(len(self.colors) - 1)[: nb_rec + 1] + 1
415 for X, f_X in [(A, f_A), (B, f_B)]:
416 r = self.rec_coo(nb_rec)
417 for n in range(nb_rec):
418 i1, j1, i2, j2 = r[n]
419 X[i1:i2, j1:j2] = c[n]
423 def contact(self, X, i, j, q):
437 if ii >= 0 and ii < self.height and jj >= 0 and jj < self.width:
438 if X[ii, jj] != 0 and X[ii, jj] != q:
447 if ii >= 0 and ii < self.height and jj >= 0 and jj < self.width:
448 if X[ii, jj] == q and X[i, jj] != q and X[ii, j] != q:
451 for ii, jj in [(i - 1, j), (i, j - 1), (i, j + 1), (i + 1, j)]:
452 if ii >= 0 and ii < self.height and jj >= 0 and jj < self.width:
456 return no, nq, nq_diag
458 def task_count(self, A, f_A, B, f_B):
459 N = torch.randint(4, (1,)) + 2
460 c = torch.randperm(len(self.colors) - 1)[:N] + 1
462 for X, f_X in [(A, f_A), (B, f_B)]:
463 nb = torch.zeros(N, dtype=torch.int64)
464 q = torch.randint(N, (self.height * self.width,))
465 k = torch.randperm(self.height * self.width)
466 for p in range(self.height * self.width):
467 i, j = k[p] % self.height, k[p] // self.height
468 no, nq, nq_diag = self.contact(X, i, j, c[q[p]])
469 if no == 0 and nq_diag == 0:
471 if nb[q[p]] < self.width:
478 for j in range(nb[n]):
481 def task_trajectory(self, A, f_A, B, f_B):
482 c = torch.randperm(len(self.colors) - 1)[:2] + 1
483 for X, f_X in [(A, f_A), (B, f_B)]:
485 di, dj = torch.randint(7, (2,)) - 3
486 i, j = torch.randint(self.height, (1,)), torch.randint(self.width, (1,))
488 abs(di) + abs(dj) > 0
490 and i + 2 * di < self.height
492 and j + 2 * dj < self.width
499 and i + k * di < self.height
501 and j + k * dj < self.width
504 X[i + k * di, j + k * dj] = c[k]
505 f_X[i + k * di, j + k * dj] = c[min(k, 1)]
508 def task_bounce(self, A, f_A, B, f_B):
509 c = torch.randperm(len(self.colors) - 1)[:3] + 1
510 for X, f_X in [(A, f_A), (B, f_B)]:
525 for _ in range((self.height * self.width) // 10):
526 i, j = torch.randint(self.height, (1,)), torch.randint(
533 di, dj = torch.randint(7, (2,)) - 3
534 if abs(di) + abs(dj) == 1:
537 i, j = torch.randint(self.height, (1,)), torch.randint(self.width, (1,))
545 if free(i + di, j + dj):
547 elif free(i - dj, j + di):
549 if free(i + dj, j - di):
550 if torch.rand(1) < 0.5:
552 elif free(i + dj, j - di):
557 i, j = i + di, j + dj
571 def task_scale(self, A, f_A, B, f_B):
572 c = torch.randperm(len(self.colors) - 1)[:2] + 1
574 i, j = torch.randint(self.height // 2, (1,)), torch.randint(
575 self.width // 2, (1,)
578 for X, f_X in [(A, f_A), (B, f_B)]:
581 i1, j1 = torch.randint(self.height // 2 + 1, (1,)), torch.randint(
582 self.width // 2 + 1, (1,)
584 i2, j2 = torch.randint(self.height // 2 + 1, (1,)), torch.randint(
585 self.width // 2 + 1, (1,)
587 if i1 < i2 and j1 < j2 and min(i2 - i1, j2 - j1) <= 3:
589 X[i + i1 : i + i2, j + j1 : j + j2] = c[0]
590 f_X[2 * i1 : 2 * i2, 2 * j1 : 2 * j2] = c[0]
595 def task_islands(self, A, f_A, B, f_B):
596 for X, f_X in [(A, f_A), (B, f_B)]:
598 i, j = torch.randint(self.height, (1,)), torch.randint(self.width, (1,))
601 or i == self.height - 1
603 or j == self.width - 1
608 di, dj = torch.randint(3, (2,)) - 1
609 if abs(di) + abs(dj) > 0:
613 i, j = i + di, j + dj
614 if i < 0 or i >= self.height or j < 0 or j >= self.width:
618 or i == self.height - 1
620 or j == self.width - 1
627 ######################################################################
631 self.task_replace_color,
634 self.task_color_grow,
638 self.task_trajectory,
644 def generate_prompts_and_answers(self, nb, tasks=None, device="cpu"):
646 tasks = self.all_tasks()
648 S = self.height * self.width
649 prompts = torch.zeros(nb, 3 * S + 2, dtype=torch.int64)
650 answers = torch.zeros(nb, S, dtype=torch.int64)
652 for prompt, answer in tqdm.tqdm(
653 zip(prompts, answers),
655 desc="world generation",
656 total=prompts.size(0),
658 A = prompt[0 * (S + 1) : 0 * (S + 1) + S].view(self.height, self.width)
659 f_A = prompt[1 * (S + 1) : 1 * (S + 1) + S].view(self.height, self.width)
660 B = prompt[2 * (S + 1) : 2 * (S + 1) + S].view(self.height, self.width)
661 f_B = answer.view(self.height, self.width)
662 task = tasks[torch.randint(len(tasks), (1,))]
665 return prompts.flatten(1), answers.flatten(1)
673 predicted_prompts=None,
674 predicted_answers=None,
679 filename_prefix + ".png",
688 ######################################################################
690 if __name__ == "__main__":
697 for t in grids.all_tasks():
698 # for t in [grids.task_islands]:
700 prompts, answers = grids.generate_prompts_and_answers(nb, tasks=[t])
701 grids.save_quizzes("/tmp", t.__name__, prompts[:nb], answers[:nb], nrow=4)
707 start_time = time.perf_counter()
708 prompts, answers = grids.generate_prompts_and_answers(nb)
709 delay = time.perf_counter() - start_time
710 print(f"{prompts.size(0)/delay:02f} seq/s")
712 m = torch.randint(2, (prompts.size(0),))
713 predicted_prompts = m * (torch.randint(2, (prompts.size(0),)) * 2 - 1)
714 predicted_answers = (1 - m) * (torch.randint(2, (prompts.size(0),)) * 2 - 1)
721 # You can add a bool to put a frame around the predicted parts
722 predicted_prompts[:nb],
723 predicted_answers[:nb],