X-Git-Url: https://fleuret.org/cgi-bin/gitweb/gitweb.cgi?a=blobdiff_plain;f=grids.py;h=20a964b8baeaff1cef3c2b02e89d9ee92a06a8a7;hb=a86dff174205c38d8e90d0d89ea399a6afb36359;hp=912581011438aba0b5e6f3fe942d81602df615e2;hpb=f71b92e23620080cae1aef965c13eaf94338dbc5;p=culture.git diff --git a/grids.py b/grids.py index 9125810..20a964b 100755 --- a/grids.py +++ b/grids.py @@ -32,11 +32,17 @@ class Grids(problem.Problem): ("gray", [128, 128, 128]), ] - def __init__(self, device=torch.device("cpu")): + def __init__( + self, + max_nb_cached_chunks=None, + chunk_size=None, + nb_threads=-1, + ): self.colors = torch.tensor([c for _, c in self.named_colors]) self.height = 10 self.width = 10 - self.device = device + self.cache_rec_coo = {} + super().__init__(max_nb_cached_chunks, chunk_size, nb_threads) ###################################################################### @@ -74,6 +80,7 @@ class Grids(problem.Problem): predicted_prompts=None, predicted_answers=None, nrow=4, + margin=8, ): S = self.height * self.width As = prompts[:, 0 * (S + 1) : 0 * (S + 1) + S].view(-1, self.height, self.width) @@ -109,10 +116,10 @@ class Grids(problem.Problem): c = c.long()[:, None] c = ( (1 - ((c == 1).long() + (c == 0).long() + (c == -1).long())) - * torch.tensor([64, 64, 64], device=c.device) - + (c == 1).long() * torch.tensor([0, 255, 0], device=c.device) - + (c == 0).long() * torch.tensor([255, 255, 255], device=c.device) - + (c == -1).long() * torch.tensor([255, 0, 0], device=c.device) + * torch.tensor([64, 64, 64]) + + (c == 1).long() * torch.tensor([0, 255, 0]) + + (c == 0).long() * torch.tensor([255, 255, 255]) + + (c == -1).long() * torch.tensor([255, 0, 0]) ) y[...] = c[:, :, None, None] @@ -120,8 +127,6 @@ class Grids(problem.Problem): return y - margin = 8 - img_prompts = torch.cat( [ add_frame( @@ -195,112 +200,160 @@ class Grids(problem.Problem): def nb_token_values(self): return len(self.colors) - # That's quite a tensorial spaghetti mess to sample - # non-overlapping rectangles quickly, but made the generation of - # 100k samples go from 1h50 with a lame pure python code to 3min30s - # with this one. - def rec_coo(self, nb_rec, min_height=3, min_width=3): - nb_trials = 200 + # @torch.compile + def rec_coo( + self, + nb_rec, + min_height=3, + min_width=3, + surface_max=None, + prevent_overlap=False, + ): + if surface_max is None: + surface_max = self.height * self.width // 2 + + signature = (nb_rec, min_height, min_width, surface_max) + try: + return self.cache_rec_coo[signature].pop() + except IndexError: + pass + except KeyError: + pass + + N = 10000 while True: - v = ( - ( - torch.rand(nb_trials * nb_rec, self.height + 1, device=self.device) - .sort(dim=-1) - .indices - < 2 - ) - .long() - .cumsum(dim=1) - == 1 - ).long() + while True: + i = torch.randint(self.height, (N * nb_rec, 2)).sort(dim=-1).values + j = torch.randint(self.width, (N * nb_rec, 2)).sort(dim=-1).values - h = ( - ( - torch.rand(nb_trials * nb_rec, self.width + 1, device=self.device) - .sort(dim=-1) - .indices - < 2 + big_enough = ( + (i[:, 1] >= i[:, 0] + min_height) + & (j[:, 1] >= j[:, 0] + min_height) + & ((i[:, 1] - i[:, 0]) * (j[:, 1] - j[:, 0]) <= surface_max) ) - .long() - .cumsum(dim=1) - == 1 - ).long() - i = torch.logical_and( - v.sum(dim=-1) >= min_height, h.sum(dim=-1) >= min_width - ) - - v, h = v[i], h[i] - v = v[: v.size(0) - v.size(0) % nb_rec] - h = h[: h.size(0) - h.size(0) % nb_rec] - v = v.reshape(v.size(0) // nb_rec, nb_rec, -1) - h = h.reshape(h.size(0) // nb_rec, nb_rec, -1) + i, j = i[big_enough], j[big_enough] - r = v[:, :, :, None] * h[:, :, None, :] + n = i.size(0) - i.size(0) % nb_rec - valid = r.sum(dim=1).flatten(1).max(dim=-1).values == 1 + if n > 0: + break - v = v[valid] - h = h[valid] + i = i[:n].reshape(n // nb_rec, nb_rec, -1) + j = j[:n].reshape(n // nb_rec, nb_rec, -1) + + if prevent_overlap: + can_fit = ((i[:, :, 1] - i[:, :, 0]) * (j[:, :, 1] - j[:, :, 0])).sum( + dim=-1 + ) <= self.height * self.width + i, j = i[can_fit], j[can_fit] + if nb_rec == 2: + A_i1, A_i2, A_j1, A_j2 = ( + i[:, 0, 0], + i[:, 0, 1], + j[:, 0, 0], + j[:, 0, 1], + ) + B_i1, B_i2, B_j1, B_j2 = ( + i[:, 1, 0], + i[:, 1, 1], + j[:, 1, 0], + j[:, 1, 1], + ) + no_overlap = torch.logical_not( + (A_i1 >= B_i2) + & (A_i2 <= B_i1) + & (A_j1 >= B_j1) + & (A_j2 <= B_j1) + ) + i, j = i[no_overlap], j[no_overlap] + elif nb_rec == 3: + A_i1, A_i2, A_j1, A_j2 = ( + i[:, 0, 0], + i[:, 0, 1], + j[:, 0, 0], + j[:, 0, 1], + ) + B_i1, B_i2, B_j1, B_j2 = ( + i[:, 1, 0], + i[:, 1, 1], + j[:, 1, 0], + j[:, 1, 1], + ) + C_i1, C_i2, C_j1, C_j2 = ( + i[:, 2, 0], + i[:, 2, 1], + j[:, 2, 0], + j[:, 2, 1], + ) + no_overlap = ( + ( + (A_i1 >= B_i2) + | (A_i2 <= B_i1) + | (A_j1 >= B_j2) + | (A_j2 <= B_j1) + ) + & ( + (A_i1 >= C_i2) + | (A_i2 <= C_i1) + | (A_j1 >= C_j2) + | (A_j2 <= C_j1) + ) + & ( + (B_i1 >= C_i2) + | (B_i2 <= C_i1) + | (B_j1 >= C_j2) + | (B_j2 <= C_j1) + ) + ) + i, j = (i[no_overlap], j[no_overlap]) + else: + assert nb_rec == 1 - if v.size(0) > 0: + if i.size(0) > 1: break - av = torch.arange(v.size(2), device=self.device)[None, :] - ah = torch.arange(h.size(2), device=self.device)[None, :] - - return [ - (i1.item(), j1.item(), i2.item() + 1, j2.item() + 1) - for i1, j1, i2, j2 in zip( - v.size(2) - (v[0] * (v.size(2) - av)).max(dim=-1).values, - h.size(2) - (h[0] * (h.size(2) - ah)).max(dim=-1).values, - (v[0] * av).max(dim=-1).values, - (h[0] * ah).max(dim=-1).values, - ) + self.cache_rec_coo[signature] = [ + [ + ( + i[n, k, 0].item(), + j[n, k, 0].item(), + i[n, k, 1].item(), + j[n, k, 1].item(), + ) + for k in range(nb_rec) + ] + for n in range(i.size(0)) ] - def rec_coo_(self, x, n, min_height=3, min_width=3): - collision = x.new(x.size()) - while True: - collision[...] = 0 - result = [] - for _ in range(n): - while True: - i1, i2 = torch.randint(x.size(0), (2,)) - if i1 + min_height <= i2: - break - while True: - j1, j2 = torch.randint(x.size(1), (2,)) - if j1 + min_width <= j2: - break - collision[i1:i2, j1:j2] += 1 - if collision.max() > 1: - break - result.append((i1, j1, i2, j2)) - if collision.max() == 1: - break - return result + return self.cache_rec_coo[signature].pop() ###################################################################### + # @torch.compile def task_replace_color(self, A, f_A, B, f_B): nb_rec = 3 c = torch.randperm(len(self.colors) - 1)[: nb_rec + 1] + 1 for X, f_X in [(A, f_A), (B, f_B)]: - r = self.rec_coo(nb_rec) + r = self.rec_coo(nb_rec, prevent_overlap=True) for n in range(nb_rec): i1, j1, i2, j2 = r[n] X[i1:i2, j1:j2] = c[n] f_X[i1:i2, j1:j2] = c[n if n > 0 else -1] + # @torch.compile def task_translate(self, A, f_A, B, f_B): - di, dj = torch.randint(3, (2,)) - 1 + while True: + di, dj = torch.randint(3, (2,)) - 1 + if di.abs() + dj.abs() > 0: + break + nb_rec = 3 c = torch.randperm(len(self.colors) - 1)[:nb_rec] + 1 for X, f_X in [(A, f_A), (B, f_B)]: while True: - r = self.rec_coo(nb_rec) + r = self.rec_coo(nb_rec, prevent_overlap=True) i1, j1, i2, j2 = r[nb_rec - 1] if ( i1 + di >= 0 @@ -318,6 +371,7 @@ class Grids(problem.Problem): else: f_X[i1:i2, j1:j2] = c[n] + # @torch.compile def task_grow(self, A, f_A, B, f_B): di, dj = torch.randint(2, (2,)) * 2 - 1 nb_rec = 3 @@ -325,7 +379,7 @@ class Grids(problem.Problem): direction = torch.randint(2, (1,)) for X, f_X in [(A, f_A), (B, f_B)]: while True: - r = self.rec_coo(nb_rec) + r = self.rec_coo(nb_rec, prevent_overlap=True) i1, j1, i2, j2 = r[nb_rec - 1] if i1 + 3 < i2 and j1 + 3 < j2: break @@ -343,13 +397,14 @@ class Grids(problem.Problem): X[i1:i2, j1:j2] = c[n] f_X[i1:i2, j1:j2] = c[n] + # @torch.compile def task_color_grow(self, A, f_A, B, f_B): di, dj = torch.randint(2, (2,)) * 2 - 1 nb_rec = 3 c = torch.randperm(len(self.colors) - 1)[: 2 * nb_rec] + 1 direction = torch.randint(4, (1,)) for X, f_X in [(A, f_A), (B, f_B)]: - r = self.rec_coo(nb_rec) + r = self.rec_coo(nb_rec, prevent_overlap=True) for n in range(nb_rec): i1, j1, i2, j2 = r[n] X[i1:i2, j1:j2] = c[2 * n] @@ -384,29 +439,36 @@ class Grids(problem.Problem): else: f_X[i1:i2, j : j + 1] = c[2 * n + 1] + # @torch.compile def task_frame(self, A, f_A, B, f_B): nb_rec = 3 c = torch.randperm(len(self.colors) - 1)[: nb_rec + 1] + 1 for X, f_X in [(A, f_A), (B, f_B)]: - r = self.rec_coo(nb_rec) + r = self.rec_coo(nb_rec, prevent_overlap=True) for n in range(nb_rec): i1, j1, i2, j2 = r[n] X[i1:i2, j1:j2] = c[n] - f_X[i1:i2, j1:j2] = c[n] if n == nb_rec - 1: - f_X[i1 + 1 : i2 - 1, j1 + 1 : j2 - 1] = 0 + f_X[i1:i2, j1] = c[n] + f_X[i1:i2, j2 - 1] = c[n] + f_X[i1, j1:j2] = c[n] + f_X[i2 - 1, j1:j2] = c[n] + else: + f_X[i1:i2, j1:j2] = c[n] + # @torch.compile def task_detect(self, A, f_A, B, f_B): nb_rec = 3 c = torch.randperm(len(self.colors) - 1)[: nb_rec + 1] + 1 for X, f_X in [(A, f_A), (B, f_B)]: - r = self.rec_coo(nb_rec) + r = self.rec_coo(nb_rec, prevent_overlap=True) for n in range(nb_rec): i1, j1, i2, j2 = r[n] X[i1:i2, j1:j2] = c[n] if n < nb_rec - 1: f_X[i1, j1] = c[-1] + # @torch.compile def contact(self, X, i, j, q): nq, nq_diag = 0, 0 no = 0 @@ -443,28 +505,63 @@ class Grids(problem.Problem): return no, nq, nq_diag def task_count(self, A, f_A, B, f_B): - N = torch.randint(4, (1,)) + 2 + N = (torch.randint(4, (1,)) + 2).item() c = torch.randperm(len(self.colors) - 1)[:N] + 1 for X, f_X in [(A, f_A), (B, f_B)]: + l_q = torch.randperm(self.height * self.width)[ + : self.height * self.width // 20 + ] + l_d = torch.randint(N, l_q.size()) nb = torch.zeros(N, dtype=torch.int64) - q = torch.randint(N, (self.height * self.width,)) - k = torch.randperm(self.height * self.width) - for p in range(self.height * self.width): - i, j = k[p] % self.height, k[p] // self.height - no, nq, nq_diag = self.contact(X, i, j, c[q[p]]) - if no == 0 and nq_diag == 0: - if nq == 0: - if nb[q[p]] < self.width: - X[i, j] = c[q[p]] - nb[q[p]] += 1 - if nq == 1: - X[i, j] = c[q[p]] - - for n in range(N): - for j in range(nb[n]): - f_X[n, j] = c[n] + for q, e in zip(l_q, l_d): + d = c[e] + i, j = q % self.height, q // self.height + if ( + nb[e] < self.width + and X[max(0, i - 1) : i + 2, max(0, j - 1) : j + 2] == 0 + ).all(): + X[i, j] = d + nb[e] += 1 + + l_q = torch.randperm((self.height - 2) * (self.width - 2))[ + : self.height * self.width // 2 + ] + l_d = torch.randint(N, l_q.size()) + for q, e in zip(l_q, l_d): + d = c[e] + i, j = q % (self.height - 2) + 1, q // (self.height - 2) + 1 + a1, a2, a3 = X[i - 1, j - 1 : j + 2] + a8, a4 = X[i, j - 1], X[i, j + 1] + a7, a6, a5 = X[i + 1, j - 1 : j + 2] + if ( + X[i, j] == 0 + and nb[e] < self.width + and (a2 == 0 or a2 == d) + and (a4 == 0 or a4 == d) + and (a6 == 0 or a6 == d) + and (a8 == 0 or a8 == d) + and (a1 == 0 or a2 == d or a8 == d) + and (a3 == 0 or a4 == d or a2 == d) + and (a5 == 0 or a6 == d or a4 == d) + and (a7 == 0 or a8 == d or a6 == d) + ): + o = ( + (a2 != 0).long() + + (a4 != 0).long() + + (a6 != 0).long() + + (a8 != 0).long() + ) + if o <= 1: + X[i, j] = d + nb[e] += 1 - o + + for e in range(N): + for j in range(nb[e]): + f_X[e, j] = c[e] + + # @torch.compile def task_trajectory(self, A, f_A, B, f_B): c = torch.randperm(len(self.colors) - 1)[:2] + 1 for X, f_X in [(A, f_A), (B, f_B)]: @@ -492,10 +589,11 @@ class Grids(problem.Problem): f_X[i + k * di, j + k * dj] = c[min(k, 1)] k += 1 + # @torch.compile def task_bounce(self, A, f_A, B, f_B): c = torch.randperm(len(self.colors) - 1)[:3] + 1 for X, f_X in [(A, f_A), (B, f_B)]: - + # @torch.compile def free(i, j): return ( i >= 0 @@ -555,6 +653,7 @@ class Grids(problem.Problem): if l > 3: break + # @torch.compile def task_scale(self, A, f_A, B, f_B): c = torch.randperm(len(self.colors) - 1)[:2] + 1 @@ -579,6 +678,7 @@ class Grids(problem.Problem): X[i, j] = c[1] f_X[0:2, 0:2] = c[1] + # @torch.compile def task_symbols(self, A, f_A, B, f_B): nb_rec = 4 c = torch.randperm(len(self.colors) - 1)[: nb_rec + 1] + 1 @@ -614,6 +714,7 @@ class Grids(problem.Problem): f_X[i[0] : i[0] + delta, j[0] : j[0] + delta] = c[q] + # @torch.compile def task_ortho(self, A, f_A, B, f_B): nb_rec = 3 di, dj = torch.randint(3, (2,)) - 1 @@ -668,8 +769,97 @@ class Grids(problem.Problem): ): break - def task_islands(self, A, f_A, B, f_B): - pass + def compute_distance(self, walls, goal_i, goal_j, start_i, start_j): + max_length = walls.numel() + dist = torch.full_like(walls, max_length) + + dist[goal_i, goal_j] = 0 + pred_dist = torch.empty_like(dist) + + while True: + pred_dist.copy_(dist) + d = ( + torch.cat( + ( + dist[None, 1:-1, 0:-2], + dist[None, 2:, 1:-1], + dist[None, 1:-1, 2:], + dist[None, 0:-2, 1:-1], + ), + 0, + ).min(dim=0)[0] + + 1 + ) + + dist[1:-1, 1:-1].minimum_(d) # = torch.min(dist[1:-1, 1:-1], d) + dist = walls * max_length + (1 - walls) * dist + + if dist[start_i, start_j] < max_length or dist.equal(pred_dist): + return dist * (1 - walls) + + # @torch.compile + def task_path(self, A, f_A, B, f_B): + c = torch.randperm(len(self.colors) - 1)[:3] + 1 + dist = torch.empty(self.height + 2, self.width + 2) + for X, f_X in [(A, f_A), (B, f_B)]: + nb_rec = torch.randint(3, (1,)) + 1 + while True: + r = self.rec_coo(nb_rec, prevent_overlap=True) + X[...] = 0 + f_X[...] = 0 + for n in range(nb_rec): + i1, j1, i2, j2 = r[n] + X[i1:i2, j1:j2] = c[0] + f_X[i1:i2, j1:j2] = c[0] + while True: + i0, j0 = torch.randint(self.height, (1,)), torch.randint( + self.width, (1,) + ) + if X[i0, j0] == 0: + break + while True: + i1, j1 = torch.randint(self.height, (1,)), torch.randint( + self.width, (1,) + ) + if X[i1, j1] == 0: + break + dist[...] = 1 + dist[1:-1, 1:-1] = (X != 0).long() + dist[...] = self.compute_distance(dist, i1 + 1, j1 + 1, i0 + 1, j0 + 1) + if dist[i0 + 1, j0 + 1] >= 1 and dist[i0 + 1, j0 + 1] < self.height * 4: + break + + dist[1:-1, 1:-1] += (X != 0).long() * self.height * self.width + dist[0, :] = self.height * self.width + dist[-1, :] = self.height * self.width + dist[:, 0] = self.height * self.width + dist[:, -1] = self.height * self.width + # dist += torch.rand(dist.size()) + + i, j = i0 + 1, j0 + 1 + while i != i1 + 1 or j != j1 + 1: + f_X[i - 1, j - 1] = c[2] + r, s, t, u = ( + dist[i - 1, j], + dist[i, j - 1], + dist[i + 1, j], + dist[i, j + 1], + ) + m = min(r, s, t, u) + if r == m: + i = i - 1 + elif t == m: + i = i + 1 + elif s == m: + j = j - 1 + else: + j = j + 1 + + X[i0, j0] = c[2] + # f_X[i0, j0] = c[1] + + X[i1, j1] = c[1] + f_X[i1, j1] = c[1] # for X, f_X in [(A, f_A), (B, f_B)]: # n = torch.arange(self.height * self.width).reshape(self.height, self.width) @@ -695,7 +885,7 @@ class Grids(problem.Problem): self.task_scale, self.task_symbols, self.task_ortho, - # self.task_islands, + # self.task_path, ] def trivial_prompts_and_answers(self, prompts, answers): @@ -704,7 +894,7 @@ class Grids(problem.Problem): f_Bs = answers return (Bs == f_Bs).long().min(dim=-1).values > 0 - def generate_prompts_and_answers(self, nb, tasks=None, device="cpu"): + def generate_prompts_and_answers_(self, nb, tasks=None, progress_bar=False): if tasks is None: tasks = self.all_tasks() @@ -712,12 +902,17 @@ class Grids(problem.Problem): prompts = torch.zeros(nb, 3 * S + 2, dtype=torch.int64) answers = torch.zeros(nb, S, dtype=torch.int64) - for prompt, answer in tqdm.tqdm( - zip(prompts, answers), - dynamic_ncols=True, - desc="world generation", - total=prompts.size(0), - ): + bunch = zip(prompts, answers) + + if progress_bar: + bunch = tqdm.tqdm( + bunch, + dynamic_ncols=True, + desc="world generation", + total=prompts.size(0), + ) + + for prompt, answer in bunch: A = prompt[0 * (S + 1) : 0 * (S + 1) + S].view(self.height, self.width) f_A = prompt[1 * (S + 1) : 1 * (S + 1) + S].view(self.height, self.width) B = prompt[2 * (S + 1) : 2 * (S + 1) + S].view(self.height, self.width) @@ -747,30 +942,57 @@ class Grids(problem.Problem): nrow, ) + def save_some_examples(self, result_dir): + nb, nrow = 72, 4 + for t in self.all_tasks(): + print(t.__name__) + prompts, answers = self.generate_prompts_and_answers_(nb, tasks=[t]) + self.save_quizzes( + result_dir, t.__name__, prompts[:nb], answers[:nb], nrow=nrow + ) + ###################################################################### if __name__ == "__main__": import time - nb = 48 - + # grids = Grids(max_nb_cached_chunks=5, chunk_size=100, nb_threads=4) grids = Grids() + # nb = 1000 + # grids = problem.MultiThreadProblem( + # grids, max_nb_cached_chunks=50, chunk_size=100, nb_threads=1 + # ) + # time.sleep(10) + # start_time = time.perf_counter() + # prompts, answers = grids.generate_prompts_and_answers(nb) + # delay = time.perf_counter() - start_time + # print(f"{prompts.size(0)/delay:02f} seq/s") + # exit(0) + + # if True: + nb, nrow = 72, 4 + # nb, nrow = 8, 2 + # for t in grids.all_tasks(): - for t in [grids.task_ortho]: + for t in [grids.task_path]: print(t.__name__) - prompts, answers = grids.generate_prompts_and_answers(nb, tasks=[t]) - grids.save_quizzes("/tmp", t.__name__, prompts[:nb], answers[:nb], nrow=4) + prompts, answers = grids.generate_prompts_and_answers_(nb, tasks=[t]) + grids.save_quizzes("/tmp", t.__name__, prompts[:nb], answers[:nb], nrow=nrow) - exit(0) + # exit(0) - nb = 72 + nb = 1000 - start_time = time.perf_counter() - prompts, answers = grids.generate_prompts_and_answers(nb) - delay = time.perf_counter() - start_time - print(f"{prompts.size(0)/delay:02f} seq/s") + for t in grids.all_tasks(): + # for t in [ grids.task_replace_color ]: #grids.all_tasks(): + start_time = time.perf_counter() + prompts, answers = grids.generate_prompts_and_answers_(nb, tasks=[t]) + delay = time.perf_counter() - start_time + print(f"{t.__name__} {prompts.size(0)/delay:02f} seq/s") + + exit(0) m = torch.randint(2, (prompts.size(0),)) predicted_prompts = m * (torch.randint(2, (prompts.size(0),)) * 2 - 1)