3 # Any copyright is dedicated to the Public Domain.
4 # https://creativecommons.org/publicdomain/zero/1.0/
6 # Written by Francois Fleuret <francois@fleuret.org>
8 import math, sys, tqdm, os, warnings
10 import torch, torchvision
13 from torch.nn import functional as F
15 ######################################################################
20 class Reasoning(problem.Problem):
22 ("white", [255, 255, 255]),
24 ("green", [0, 192, 0]),
25 ("blue", [0, 0, 255]),
26 ("orange", [255, 192, 0]),
27 ("cyan", [0, 255, 255]),
28 ("violet", [255, 0, 255]),
29 ("lightgreen", [192, 255, 192]),
30 ("brown", [165, 42, 42]),
31 ("lightblue", [192, 192, 255]),
32 ("gray", [128, 128, 128]),
35 def __init__(self, device=torch.device("cpu")):
36 self.colors = torch.tensor([c for _, c in self.named_colors])
37 self.name2color = dict([(p[0], i) for i, p in enumerate(self.named_colors)])
42 ######################################################################
44 def frame2img(self, x, scale=15):
45 x = x.reshape(x.size(0), self.height, -1)
46 m = torch.logical_and(x >= 0, x < self.nb_token_values()).long()
47 x = self.colors[x * m].permute(0, 3, 1, 2)
49 x = x[:, :, :, None, :, None].expand(-1, -1, -1, scale, -1, scale)
50 x = x.reshape(s[0], s[1], s[2] * scale, s[3] * scale)
52 x[:, :, :, torch.arange(0, x.size(3), scale)] = 0
53 x[:, :, torch.arange(0, x.size(2), scale), :] = 0
56 for n in range(m.size(0)):
57 for i in range(m.size(1)):
58 for j in range(m.size(2)):
60 for k in range(2, scale - 2):
62 x[n, :, i * scale + k, j * scale + k - l] = 0
64 n, :, i * scale + scale - 1 - k, j * scale + k - l
69 def frame2img_(self, x, scale=15):
70 x = x.reshape(x.size(0), self.height, -1)
71 x = self.colors[x].permute(0, 3, 1, 2)
73 x = x[:, :, :, None, :, None].expand(-1, -1, -1, scale, -1, scale)
74 x = x.reshape(s[0], s[1], s[2] * scale, s[3] * scale)
76 x[:, :, :, torch.arange(0, x.size(3), scale)] = 0
77 x[:, :, torch.arange(0, x.size(2), scale), :] = 0
88 predicted_prompts=None,
89 predicted_answers=None,
91 prompts = prompts.reshape(prompts.size(0), self.height, -1)
92 answers = answers.reshape(answers.size(0), self.height, -1)
94 if predicted_prompts is None:
95 predicted_prompts = 255
97 if predicted_answers is None:
98 predicted_answers = 255
100 def add_frame(x, c, margin, bottom=False):
102 h, w, di, dj = x.size(2) + margin, x.size(3), 0, 0
105 x.size(2) + 2 * margin,
106 x.size(3) + 2 * margin,
111 y = x.new_full((x.size(0), x.size(1), h, w), 0)
116 c = c.long()[:, None]
117 c = c * torch.tensor([192, 192, 192], device=c.device) + (
119 ) * torch.tensor([255, 255, 255], device=c.device)
120 y[...] = c[:, :, None, None]
122 y[:, :, di : di + x.size(2), dj : dj + x.size(3)] = x
128 img_prompts = torch.cat(
131 add_frame(self.frame2img(x), c=0, margin=1),
135 for x in prompts.to("cpu").split(split_size=self.width, dim=2)
140 h = img_prompts.size(2)
141 img_answers = add_frame(
142 add_frame(self.frame2img(answers.to("cpu")), c=0, margin=1),
147 separator_size = 2 * margin
149 separator = img_prompts.new_full(
159 marker = img_prompts.new_full(
169 # marker[:, :, 0] = 0
170 # marker[:, :, h - 1] = 0
172 for k in range(1, 2 * separator_size - 8):
173 i = k - (separator_size - 4)
174 j = separator_size - 5 - abs(i)
175 marker[:, :, h // 2 - 1 + i, 2 + j] = 0
176 marker[:, :, h // 2 - 1 + i + 1, 2 + j] = 0
187 image_name = os.path.join(result_dir, filename)
188 torchvision.utils.save_image(
189 img.float() / 255.0, image_name, nrow=4, padding=margin * 4, pad_value=1.0
192 ######################################################################
194 def nb_token_values(self):
195 return len(self.colors)
197 # That's quite a tensorial spaghetti mess to sample
198 # non-overlapping rectangles quickly, but made the generation of
199 # 100k samples go from 1h50 with a lame pure python code to 3min30s
201 def rec_coo(self, nb_rec, min_height=3, min_width=3):
207 torch.rand(nb_trials * nb_rec, self.height + 1, device=self.device)
219 torch.rand(nb_trials * nb_rec, self.width + 1, device=self.device)
229 i = torch.logical_and(
230 v.sum(dim=-1) >= min_height, h.sum(dim=-1) >= min_width
234 v = v[: v.size(0) - v.size(0) % nb_rec]
235 h = h[: h.size(0) - h.size(0) % nb_rec]
236 v = v.reshape(v.size(0) // nb_rec, nb_rec, -1)
237 h = h.reshape(h.size(0) // nb_rec, nb_rec, -1)
239 r = v[:, :, :, None] * h[:, :, None, :]
241 valid = r.sum(dim=1).flatten(1).max(dim=-1).values == 1
249 av = torch.arange(v.size(2), device=self.device)[None, :]
250 ah = torch.arange(h.size(2), device=self.device)[None, :]
253 (i1.item(), j1.item(), i2.item() + 1, j2.item() + 1)
254 for i1, j1, i2, j2 in zip(
255 v.size(2) - (v[0] * (v.size(2) - av)).max(dim=-1).values,
256 h.size(2) - (h[0] * (h.size(2) - ah)).max(dim=-1).values,
257 (v[0] * av).max(dim=-1).values,
258 (h[0] * ah).max(dim=-1).values,
262 def rec_coo_(self, x, n, min_height=3, min_width=3):
263 collision = x.new(x.size())
269 i1, i2 = torch.randint(x.size(0), (2,))
270 if i1 + min_height <= i2:
273 j1, j2 = torch.randint(x.size(1), (2,))
274 if j1 + min_width <= j2:
276 collision[i1:i2, j1:j2] += 1
277 if collision.max() > 1:
279 result.append((i1, j1, i2, j2))
280 if collision.max() == 1:
284 ######################################################################
286 def task_replace_color(self, A, f_A, B, f_B):
288 c = torch.randperm(len(self.colors) - 1)[: nb_rec + 1] + 1
289 for X, f_X in [(A, f_A), (B, f_B)]:
290 r = self.rec_coo(nb_rec)
291 for n in range(nb_rec):
292 i1, j1, i2, j2 = r[n]
293 X[i1:i2, j1:j2] = c[n]
294 f_X[i1:i2, j1:j2] = c[n if n > 0 else -1]
296 def task_move(self, A, f_A, B, f_B):
297 di, dj = torch.randint(3, (2,)) - 1
299 c = torch.randperm(len(self.colors) - 1)[:nb_rec] + 1
300 for X, f_X in [(A, f_A), (B, f_B)]:
302 r = self.rec_coo(nb_rec)
303 i1, j1, i2, j2 = r[nb_rec - 1]
306 and i2 + di < X.size(0)
308 and j2 + dj < X.size(1)
312 for n in range(nb_rec):
313 i1, j1, i2, j2 = r[n]
314 X[i1:i2, j1:j2] = c[n]
316 f_X[i1 + di : i2 + di, j1 + dj : j2 + dj] = c[n]
318 f_X[i1:i2, j1:j2] = c[n]
320 def task_grow(self, A, f_A, B, f_B):
321 di, dj = torch.randint(2, (2,)) * 2 - 1
323 c = torch.randperm(len(self.colors) - 1)[:nb_rec] + 1
324 direction = torch.randint(2, (1,))
325 for X, f_X in [(A, f_A), (B, f_B)]:
327 r = self.rec_coo(nb_rec)
328 i1, j1, i2, j2 = r[nb_rec - 1]
329 if i1 + 3 < i2 and j1 + 3 < j2:
332 for n in range(nb_rec):
333 i1, j1, i2, j2 = r[n]
336 X[i1 + 1 : i2 - 1, j1 + 1 : j2 - 1] = c[n]
337 f_X[i1:i2, j1:j2] = c[n]
339 X[i1:i2, j1:j2] = c[n]
340 f_X[i1 + 1 : i2 - 1, j1 + 1 : j2 - 1] = c[n]
342 X[i1:i2, j1:j2] = c[n]
343 f_X[i1:i2, j1:j2] = c[n]
345 def task_color_grow(self, A, f_A, B, f_B):
346 di, dj = torch.randint(2, (2,)) * 2 - 1
348 c = torch.randperm(len(self.colors) - 1)[: 2 * nb_rec] + 1
349 direction = torch.randint(4, (1,))
350 for X, f_X in [(A, f_A), (B, f_B)]:
351 r = self.rec_coo(nb_rec)
352 for n in range(nb_rec):
353 i1, j1, i2, j2 = r[n]
354 X[i1:i2, j1:j2] = c[2 * n]
355 f_X[i1:i2, j1:j2] = c[2 * n]
356 # Not my proudest moment
359 X[i : i + 1, j1:j2] = c[2 * n + 1]
361 f_X[i:i2, j1:j2] = c[2 * n + 1]
363 f_X[i : i + 1, j1:j2] = c[2 * n + 1]
365 i = (i1 + i2 - 1) // 2
366 X[i : i + 1, j1:j2] = c[2 * n + 1]
368 f_X[i1 : i + 1, j1:j2] = c[2 * n + 1]
370 f_X[i : i + 1, j1:j2] = c[2 * n + 1]
373 X[i1:i2, j : j + 1] = c[2 * n + 1]
375 f_X[i1:i2, j:j2] = c[2 * n + 1]
377 f_X[i1:i2, j : j + 1] = c[2 * n + 1]
379 j = (j1 + j2 - 1) // 2
380 X[i1:i2, j : j + 1] = c[2 * n + 1]
382 f_X[i1:i2, j1 : j + 1] = c[2 * n + 1]
384 f_X[i1:i2, j : j + 1] = c[2 * n + 1]
386 def task_frame(self, A, f_A, B, f_B):
388 c = torch.randperm(len(self.colors) - 1)[: nb_rec + 1] + 1
389 for X, f_X in [(A, f_A), (B, f_B)]:
390 r = self.rec_coo(nb_rec)
391 for n in range(nb_rec):
392 i1, j1, i2, j2 = r[n]
393 X[i1:i2, j1:j2] = c[n]
394 f_X[i1:i2, j1:j2] = c[n]
396 f_X[i1 + 1 : i2 - 1, j1 + 1 : j2 - 1] = 0
398 def task_detect(self, A, f_A, B, f_B):
400 c = torch.randperm(len(self.colors) - 1)[: nb_rec + 1] + 1
401 for X, f_X in [(A, f_A), (B, f_B)]:
402 r = self.rec_coo(nb_rec)
403 for n in range(nb_rec):
404 i1, j1, i2, j2 = r[n]
405 X[i1:i2, j1:j2] = c[n]
409 ######################################################################
411 def generate_prompts_and_answers(self, nb, device="cpu"):
413 self.task_replace_color,
416 self.task_color_grow,
420 prompts = torch.zeros(nb, self.height, self.width * 3, dtype=torch.int64)
421 answers = torch.zeros(nb, self.height, self.width, dtype=torch.int64)
424 for prompt, answer in tqdm.tqdm(
425 zip(prompts, answers),
427 desc="world generation",
428 total=prompts.size(0),
430 A = prompt[:, 0 * w : 1 * w]
431 f_A = prompt[:, 1 * w : 2 * w]
432 B = prompt[:, 2 * w : 3 * w]
434 task = tasks[torch.randint(len(tasks), (1,))]
437 return prompts.flatten(1), answers.flatten(1)
445 predicted_prompts=None,
446 predicted_answers=None,
450 filename_prefix + ".png",
458 ######################################################################
460 if __name__ == "__main__":
463 reasoning = Reasoning()
465 start_time = time.perf_counter()
466 prompts, answers = reasoning.generate_prompts_and_answers(100)
467 delay = time.perf_counter() - start_time
468 print(f"{prompts.size(0)/delay:02f} seq/s")
470 predicted_prompts = torch.rand(prompts.size(0)) < 0.5
471 predicted_answers = torch.logical_not(predicted_prompts)
473 reasoning.save_quizzes(
478 # You can add a bool to put a frame around the predicted parts
479 predicted_prompts[:64],
480 predicted_answers[:64],