3 # Any copyright is dedicated to the Public Domain.
4 # https://creativecommons.org/publicdomain/zero/1.0/
6 # Written by Francois Fleuret <francois@fleuret.org>
8 import math, sys, tqdm, os, warnings
10 import torch, torchvision
13 from torch.nn import functional as F
15 ######################################################################
20 class Grids(problem.Problem):
22 ("white", [255, 255, 255]),
24 ("green", [0, 192, 0]),
25 ("blue", [0, 0, 255]),
26 ("orange", [255, 192, 0]),
27 ("cyan", [0, 255, 255]),
28 ("violet", [255, 0, 255]),
29 ("lightgreen", [192, 255, 192]),
30 ("brown", [165, 42, 42]),
31 ("lightblue", [192, 192, 255]),
32 ("gray", [128, 128, 128]),
35 def __init__(self, device=torch.device("cpu")):
36 self.colors = torch.tensor([c for _, c in self.named_colors])
37 self.name2color = dict([(p[0], i) for i, p in enumerate(self.named_colors)])
42 ######################################################################
44 def frame2img(self, x, scale=15):
45 x = x.reshape(x.size(0), self.height, -1)
46 m = torch.logical_and(x >= 0, x < self.nb_token_values()).long()
47 x = self.colors[x * m].permute(0, 3, 1, 2)
49 x = x[:, :, :, None, :, None].expand(-1, -1, -1, scale, -1, scale)
50 x = x.reshape(s[0], s[1], s[2] * scale, s[3] * scale)
52 x[:, :, :, torch.arange(0, x.size(3), scale)] = 0
53 x[:, :, torch.arange(0, x.size(2), scale), :] = 0
56 for n in range(m.size(0)):
57 for i in range(m.size(1)):
58 for j in range(m.size(2)):
60 for k in range(2, scale - 2):
62 x[n, :, i * scale + k, j * scale + k - l] = 0
64 n, :, i * scale + scale - 1 - k, j * scale + k - l
69 def frame2img_(self, x, scale=15):
70 x = x.reshape(x.size(0), self.height, -1)
71 x = self.colors[x].permute(0, 3, 1, 2)
73 x = x[:, :, :, None, :, None].expand(-1, -1, -1, scale, -1, scale)
74 x = x.reshape(s[0], s[1], s[2] * scale, s[3] * scale)
76 x[:, :, :, torch.arange(0, x.size(3), scale)] = 0
77 x[:, :, torch.arange(0, x.size(2), scale), :] = 0
88 predicted_prompts=None,
89 predicted_answers=None,
92 S = self.height * self.width
93 As = prompts[:, 0 * (S + 1) : 0 * (S + 1) + S].view(-1, self.height, self.width)
94 f_As = prompts[:, 1 * (S + 1) : 1 * (S + 1) + S].view(
95 -1, self.height, self.width
97 Bs = prompts[:, 2 * (S + 1) : 2 * (S + 1) + S].view(-1, self.height, self.width)
98 prompts = torch.cat([As, f_As, Bs], dim=2)
99 answers = answers.reshape(answers.size(0), self.height, self.width)
101 if predicted_prompts is None:
102 predicted_prompts = 255
104 if predicted_answers is None:
105 predicted_answers = 255
107 def add_frame(x, c, margin, bottom=False):
109 h, w, di, dj = x.size(2) + margin, x.size(3), 0, 0
112 x.size(2) + 2 * margin,
113 x.size(3) + 2 * margin,
118 y = x.new_full((x.size(0), x.size(1), h, w), 0)
123 c = c.long()[:, None]
125 (1 - ((c == 1).long() + (c == 0).long() + (c == -1).long()))
126 * torch.tensor([64, 64, 64], device=c.device)
127 + (c == 1).long() * torch.tensor([0, 255, 0], device=c.device)
128 + (c == 0).long() * torch.tensor([255, 255, 255], device=c.device)
129 + (c == -1).long() * torch.tensor([255, 0, 0], device=c.device)
131 y[...] = c[:, :, None, None]
133 y[:, :, di : di + x.size(2), dj : dj + x.size(3)] = x
139 img_prompts = torch.cat(
142 add_frame(self.frame2img(x), c=0, margin=1),
146 for x in prompts.to("cpu").split(split_size=self.width, dim=2)
151 h = img_prompts.size(2)
152 img_answers = add_frame(
153 add_frame(self.frame2img(answers.to("cpu")), c=0, margin=1),
158 separator_size = 2 * margin
160 separator = img_prompts.new_full(
170 marker = img_prompts.new_full(
180 # marker[:, :, 0] = 0
181 # marker[:, :, h - 1] = 0
183 for k in range(1, 2 * separator_size - 8):
184 i = k - (separator_size - 4)
185 j = separator_size - 5 - abs(i)
186 marker[:, :, h // 2 - 1 + i, 2 + j] = 0
187 marker[:, :, h // 2 - 1 + i + 1, 2 + j] = 0
198 image_name = os.path.join(result_dir, filename)
199 torchvision.utils.save_image(
207 ######################################################################
209 def nb_token_values(self):
210 return len(self.colors)
212 # That's quite a tensorial spaghetti mess to sample
213 # non-overlapping rectangles quickly, but made the generation of
214 # 100k samples go from 1h50 with a lame pure python code to 3min30s
216 def rec_coo(self, nb_rec, min_height=3, min_width=3):
222 torch.rand(nb_trials * nb_rec, self.height + 1, device=self.device)
234 torch.rand(nb_trials * nb_rec, self.width + 1, device=self.device)
244 i = torch.logical_and(
245 v.sum(dim=-1) >= min_height, h.sum(dim=-1) >= min_width
249 v = v[: v.size(0) - v.size(0) % nb_rec]
250 h = h[: h.size(0) - h.size(0) % nb_rec]
251 v = v.reshape(v.size(0) // nb_rec, nb_rec, -1)
252 h = h.reshape(h.size(0) // nb_rec, nb_rec, -1)
254 r = v[:, :, :, None] * h[:, :, None, :]
256 valid = r.sum(dim=1).flatten(1).max(dim=-1).values == 1
264 av = torch.arange(v.size(2), device=self.device)[None, :]
265 ah = torch.arange(h.size(2), device=self.device)[None, :]
268 (i1.item(), j1.item(), i2.item() + 1, j2.item() + 1)
269 for i1, j1, i2, j2 in zip(
270 v.size(2) - (v[0] * (v.size(2) - av)).max(dim=-1).values,
271 h.size(2) - (h[0] * (h.size(2) - ah)).max(dim=-1).values,
272 (v[0] * av).max(dim=-1).values,
273 (h[0] * ah).max(dim=-1).values,
277 def rec_coo_(self, x, n, min_height=3, min_width=3):
278 collision = x.new(x.size())
284 i1, i2 = torch.randint(x.size(0), (2,))
285 if i1 + min_height <= i2:
288 j1, j2 = torch.randint(x.size(1), (2,))
289 if j1 + min_width <= j2:
291 collision[i1:i2, j1:j2] += 1
292 if collision.max() > 1:
294 result.append((i1, j1, i2, j2))
295 if collision.max() == 1:
299 ######################################################################
301 def task_replace_color(self, A, f_A, B, f_B):
303 c = torch.randperm(len(self.colors) - 1)[: nb_rec + 1] + 1
304 for X, f_X in [(A, f_A), (B, f_B)]:
305 r = self.rec_coo(nb_rec)
306 for n in range(nb_rec):
307 i1, j1, i2, j2 = r[n]
308 X[i1:i2, j1:j2] = c[n]
309 f_X[i1:i2, j1:j2] = c[n if n > 0 else -1]
311 def task_translate(self, A, f_A, B, f_B):
312 di, dj = torch.randint(3, (2,)) - 1
314 c = torch.randperm(len(self.colors) - 1)[:nb_rec] + 1
315 for X, f_X in [(A, f_A), (B, f_B)]:
317 r = self.rec_coo(nb_rec)
318 i1, j1, i2, j2 = r[nb_rec - 1]
321 and i2 + di < X.size(0)
323 and j2 + dj < X.size(1)
327 for n in range(nb_rec):
328 i1, j1, i2, j2 = r[n]
329 X[i1:i2, j1:j2] = c[n]
331 f_X[i1 + di : i2 + di, j1 + dj : j2 + dj] = c[n]
333 f_X[i1:i2, j1:j2] = c[n]
335 def task_grow(self, A, f_A, B, f_B):
336 di, dj = torch.randint(2, (2,)) * 2 - 1
338 c = torch.randperm(len(self.colors) - 1)[:nb_rec] + 1
339 direction = torch.randint(2, (1,))
340 for X, f_X in [(A, f_A), (B, f_B)]:
342 r = self.rec_coo(nb_rec)
343 i1, j1, i2, j2 = r[nb_rec - 1]
344 if i1 + 3 < i2 and j1 + 3 < j2:
347 for n in range(nb_rec):
348 i1, j1, i2, j2 = r[n]
351 X[i1 + 1 : i2 - 1, j1 + 1 : j2 - 1] = c[n]
352 f_X[i1:i2, j1:j2] = c[n]
354 X[i1:i2, j1:j2] = c[n]
355 f_X[i1 + 1 : i2 - 1, j1 + 1 : j2 - 1] = c[n]
357 X[i1:i2, j1:j2] = c[n]
358 f_X[i1:i2, j1:j2] = c[n]
360 def task_color_grow(self, A, f_A, B, f_B):
361 di, dj = torch.randint(2, (2,)) * 2 - 1
363 c = torch.randperm(len(self.colors) - 1)[: 2 * nb_rec] + 1
364 direction = torch.randint(4, (1,))
365 for X, f_X in [(A, f_A), (B, f_B)]:
366 r = self.rec_coo(nb_rec)
367 for n in range(nb_rec):
368 i1, j1, i2, j2 = r[n]
369 X[i1:i2, j1:j2] = c[2 * n]
370 f_X[i1:i2, j1:j2] = c[2 * n]
371 # Not my proudest moment
374 X[i : i + 1, j1:j2] = c[2 * n + 1]
376 f_X[i:i2, j1:j2] = c[2 * n + 1]
378 f_X[i : i + 1, j1:j2] = c[2 * n + 1]
380 i = (i1 + i2 - 1) // 2
381 X[i : i + 1, j1:j2] = c[2 * n + 1]
383 f_X[i1 : i + 1, j1:j2] = c[2 * n + 1]
385 f_X[i : i + 1, j1:j2] = c[2 * n + 1]
388 X[i1:i2, j : j + 1] = c[2 * n + 1]
390 f_X[i1:i2, j:j2] = c[2 * n + 1]
392 f_X[i1:i2, j : j + 1] = c[2 * n + 1]
394 j = (j1 + j2 - 1) // 2
395 X[i1:i2, j : j + 1] = c[2 * n + 1]
397 f_X[i1:i2, j1 : j + 1] = c[2 * n + 1]
399 f_X[i1:i2, j : j + 1] = c[2 * n + 1]
401 def task_frame(self, A, f_A, B, f_B):
403 c = torch.randperm(len(self.colors) - 1)[: nb_rec + 1] + 1
404 for X, f_X in [(A, f_A), (B, f_B)]:
405 r = self.rec_coo(nb_rec)
406 for n in range(nb_rec):
407 i1, j1, i2, j2 = r[n]
408 X[i1:i2, j1:j2] = c[n]
409 f_X[i1:i2, j1:j2] = c[n]
411 f_X[i1 + 1 : i2 - 1, j1 + 1 : j2 - 1] = 0
413 def task_detect(self, A, f_A, B, f_B):
415 c = torch.randperm(len(self.colors) - 1)[: nb_rec + 1] + 1
416 for X, f_X in [(A, f_A), (B, f_B)]:
417 r = self.rec_coo(nb_rec)
418 for n in range(nb_rec):
419 i1, j1, i2, j2 = r[n]
420 X[i1:i2, j1:j2] = c[n]
424 def contact(self, X, i, j, q):
438 if ii >= 0 and ii < self.height and jj >= 0 and jj < self.width:
439 if X[ii, jj] != 0 and X[ii, jj] != q:
448 if ii >= 0 and ii < self.height and jj >= 0 and jj < self.width:
449 if X[ii, jj] == q and X[i, jj] != q and X[ii, j] != q:
452 for ii, jj in [(i - 1, j), (i, j - 1), (i, j + 1), (i + 1, j)]:
453 if ii >= 0 and ii < self.height and jj >= 0 and jj < self.width:
457 return no, nq, nq_diag
459 def task_count(self, A, f_A, B, f_B):
460 N = torch.randint(4, (1,)) + 2
461 c = torch.randperm(len(self.colors) - 1)[:N] + 1
463 for X, f_X in [(A, f_A), (B, f_B)]:
464 nb = torch.zeros(N, dtype=torch.int64)
465 q = torch.randint(N, (self.height * self.width,))
466 k = torch.randperm(self.height * self.width)
467 for p in range(self.height * self.width):
468 i, j = k[p] % self.height, k[p] // self.height
469 no, nq, nq_diag = self.contact(X, i, j, c[q[p]])
470 if no == 0 and nq_diag == 0:
472 if nb[q[p]] < self.width:
479 for j in range(nb[n]):
482 def task_trajectory(self, A, f_A, B, f_B):
483 c = torch.randperm(len(self.colors) - 1)[:2] + 1
484 for X, f_X in [(A, f_A), (B, f_B)]:
486 di, dj = torch.randint(7, (2,)) - 3
487 i, j = torch.randint(self.height, (1,)), torch.randint(self.width, (1,))
489 abs(di) + abs(dj) > 0
491 and i + 2 * di < self.height
493 and j + 2 * dj < self.width
500 and i + k * di < self.height
502 and j + k * dj < self.width
505 X[i + k * di, j + k * dj] = c[k]
506 f_X[i + k * di, j + k * dj] = c[min(k, 1)]
509 def task_bounce(self, A, f_A, B, f_B):
510 c = torch.randperm(len(self.colors) - 1)[:3] + 1
511 for X, f_X in [(A, f_A), (B, f_B)]:
526 for _ in range((self.height * self.width) // 10):
527 i, j = torch.randint(self.height, (1,)), torch.randint(
534 di, dj = torch.randint(7, (2,)) - 3
535 if abs(di) + abs(dj) == 1:
538 i, j = torch.randint(self.height, (1,)), torch.randint(self.width, (1,))
546 if free(i + di, j + dj):
548 elif free(i - dj, j + di):
550 if free(i + dj, j - di):
551 if torch.rand(1) < 0.5:
553 elif free(i + dj, j - di):
558 i, j = i + di, j + dj
572 def task_scale(self, A, f_A, B, f_B):
573 c = torch.randperm(len(self.colors) - 1)[:2] + 1
575 i, j = torch.randint(self.height // 2, (1,)), torch.randint(
576 self.width // 2, (1,)
579 for X, f_X in [(A, f_A), (B, f_B)]:
582 i1, j1 = torch.randint(self.height // 2 + 1, (1,)), torch.randint(
583 self.width // 2 + 1, (1,)
585 i2, j2 = torch.randint(self.height // 2 + 1, (1,)), torch.randint(
586 self.width // 2 + 1, (1,)
588 if i1 < i2 and j1 < j2 and min(i2 - i1, j2 - j1) <= 3:
590 X[i + i1 : i + i2, j + j1 : j + j2] = c[0]
591 f_X[2 * i1 : 2 * i2, 2 * j1 : 2 * j2] = c[0]
596 def task_islands(self, A, f_A, B, f_B):
597 for X, f_X in [(A, f_A), (B, f_B)]:
599 i, j = torch.randint(self.height, (1,)), torch.randint(self.width, (1,))
602 or i == self.height - 1
604 or j == self.width - 1
609 di, dj = torch.randint(3, (2,)) - 1
610 if abs(di) + abs(dj) > 0:
614 i, j = i + di, j + dj
615 if i < 0 or i >= self.height or j < 0 or j >= self.width:
619 or i == self.height - 1
621 or j == self.width - 1
628 ######################################################################
632 self.task_replace_color,
635 self.task_color_grow,
639 self.task_trajectory,
645 def generate_prompts_and_answers(self, nb, tasks=None, device="cpu"):
647 tasks = self.all_tasks()
649 S = self.height * self.width
650 prompts = torch.zeros(nb, 3 * S + 2, dtype=torch.int64)
651 answers = torch.zeros(nb, S, dtype=torch.int64)
653 for prompt, answer in tqdm.tqdm(
654 zip(prompts, answers),
656 desc="world generation",
657 total=prompts.size(0),
659 A = prompt[0 * (S + 1) : 0 * (S + 1) + S].view(self.height, self.width)
660 f_A = prompt[1 * (S + 1) : 1 * (S + 1) + S].view(self.height, self.width)
661 B = prompt[2 * (S + 1) : 2 * (S + 1) + S].view(self.height, self.width)
662 f_B = answer.view(self.height, self.width)
663 task = tasks[torch.randint(len(tasks), (1,))]
666 return prompts.flatten(1), answers.flatten(1)
674 predicted_prompts=None,
675 predicted_answers=None,
680 filename_prefix + ".png",
689 ######################################################################
691 if __name__ == "__main__":
698 for t in grids.all_tasks():
699 # for t in [grids.task_islands]:
701 prompts, answers = grids.generate_prompts_and_answers(nb, tasks=[t])
702 grids.save_quizzes("/tmp", t.__name__, prompts[:nb], answers[:nb], nrow=4)
708 start_time = time.perf_counter()
709 prompts, answers = grids.generate_prompts_and_answers(nb)
710 delay = time.perf_counter() - start_time
711 print(f"{prompts.size(0)/delay:02f} seq/s")
713 m = torch.randint(2, (prompts.size(0),))
714 predicted_prompts = m * (torch.randint(2, (prompts.size(0),)) * 2 - 1)
715 predicted_answers = (1 - m) * (torch.randint(2, (prompts.size(0),)) * 2 - 1)
722 # You can add a bool to put a frame around the predicted parts
723 predicted_prompts[:nb],
724 predicted_answers[:nb],