predicted_prompts=None,
predicted_answers=None,
nrow=4,
+ margin=8,
):
S = self.height * self.width
As = prompts[:, 0 * (S + 1) : 0 * (S + 1) + S].view(-1, self.height, self.width)
return y
- margin = 8
-
img_prompts = torch.cat(
[
add_frame(
def nb_token_values(self):
return len(self.colors)
+ @torch.compile
+ def rec_coo_(self, nb_rec, min_height=3, min_width=3):
+ @torch.compile
+ def overlap(ia, ja, ib, jb):
+ return (
+ ia[1] >= ib[0] and ia[0] <= ib[1] and ja[1] >= jb[0] and ja[0] <= jb[1]
+ )
+
+ if nb_rec == 3:
+ while True:
+ i = torch.randint(self.height + 1, (nb_rec, 2)).sort(dim=1).values
+ j = torch.randint(self.width + 1, (nb_rec, 2)).sort(dim=1).values
+ if (
+ not (
+ overlap(i[0], j[0], i[1], j[1])
+ or overlap(i[0], j[0], i[2], j[2])
+ or overlap(i[1], j[1], i[2], j[2])
+ )
+ and (i[:, 1] - i[:, 0]).min() >= min_height
+ and (j[:, 1] - j[:, 0]).min() >= min_width
+ ):
+ break
+ return (
+ (i[0, 0], j[0, 0], i[0, 1], j[0, 1]),
+ (i[1, 0], j[1, 0], i[1, 1], j[1, 1]),
+ (i[2, 0], j[2, 0], i[2, 1], j[2, 1]),
+ )
+
# That's quite a tensorial spaghetti mess to sample
# non-overlapping rectangles quickly, but made the generation of
# 100k samples go from 1h50 with a lame pure python code to 3min30s
# with this one.
+ @torch.compile
def rec_coo(self, nb_rec, min_height=3, min_width=3):
nb_trials = 200
)
]
+ @torch.compile
def rec_coo_(self, x, n, min_height=3, min_width=3):
collision = x.new(x.size())
while True:
######################################################################
+ @torch.compile
def task_replace_color(self, A, f_A, B, f_B):
nb_rec = 3
c = torch.randperm(len(self.colors) - 1)[: nb_rec + 1] + 1
X[i1:i2, j1:j2] = c[n]
f_X[i1:i2, j1:j2] = c[n if n > 0 else -1]
+ @torch.compile
def task_translate(self, A, f_A, B, f_B):
di, dj = torch.randint(3, (2,)) - 1
nb_rec = 3
else:
f_X[i1:i2, j1:j2] = c[n]
+ @torch.compile
def task_grow(self, A, f_A, B, f_B):
di, dj = torch.randint(2, (2,)) * 2 - 1
nb_rec = 3
X[i1:i2, j1:j2] = c[n]
f_X[i1:i2, j1:j2] = c[n]
+ @torch.compile
def task_color_grow(self, A, f_A, B, f_B):
di, dj = torch.randint(2, (2,)) * 2 - 1
nb_rec = 3
else:
f_X[i1:i2, j : j + 1] = c[2 * n + 1]
+ @torch.compile
def task_frame(self, A, f_A, B, f_B):
nb_rec = 3
c = torch.randperm(len(self.colors) - 1)[: nb_rec + 1] + 1
if n == nb_rec - 1:
f_X[i1 + 1 : i2 - 1, j1 + 1 : j2 - 1] = 0
+ @torch.compile
def task_detect(self, A, f_A, B, f_B):
nb_rec = 3
c = torch.randperm(len(self.colors) - 1)[: nb_rec + 1] + 1
if n < nb_rec - 1:
f_X[i1, j1] = c[-1]
+ @torch.compile
def contact(self, X, i, j, q):
nq, nq_diag = 0, 0
no = 0
return no, nq, nq_diag
+ @torch.compile
def task_count(self, A, f_A, B, f_B):
- N = torch.randint(4, (1,)) + 2
+ N = (torch.randint(4, (1,)) + 2).item()
c = torch.randperm(len(self.colors) - 1)[:N] + 1
for X, f_X in [(A, f_A), (B, f_B)]:
for j in range(nb[n]):
f_X[n, j] = c[n]
+ @torch.compile
def task_trajectory(self, A, f_A, B, f_B):
c = torch.randperm(len(self.colors) - 1)[:2] + 1
for X, f_X in [(A, f_A), (B, f_B)]:
f_X[i + k * di, j + k * dj] = c[min(k, 1)]
k += 1
+ @torch.compile
def task_bounce(self, A, f_A, B, f_B):
c = torch.randperm(len(self.colors) - 1)[:3] + 1
for X, f_X in [(A, f_A), (B, f_B)]:
+ @torch.compile
def free(i, j):
return (
i >= 0
if l > 3:
break
+ @torch.compile
def task_scale(self, A, f_A, B, f_B):
c = torch.randperm(len(self.colors) - 1)[:2] + 1
X[i, j] = c[1]
f_X[0:2, 0:2] = c[1]
+ @torch.compile
def task_symbols(self, A, f_A, B, f_B):
nb_rec = 4
c = torch.randperm(len(self.colors) - 1)[: nb_rec + 1] + 1
f_X[i[0] : i[0] + delta, j[0] : j[0] + delta] = c[q]
+ @torch.compile
def task_ortho(self, A, f_A, B, f_B):
nb_rec = 3
di, dj = torch.randint(3, (2,)) - 1
):
break
+ @torch.compile
def task_islands(self, A, f_A, B, f_B):
pass
f_Bs = answers
return (Bs == f_Bs).long().min(dim=-1).values > 0
- def generate_prompts_and_answers(self, nb, tasks=None, device="cpu"):
+ def generate_prompts_and_answers(
+ self, nb, tasks=None, progress_bar=False, device="cpu"
+ ):
if tasks is None:
tasks = self.all_tasks()
prompts = torch.zeros(nb, 3 * S + 2, dtype=torch.int64)
answers = torch.zeros(nb, S, dtype=torch.int64)
- for prompt, answer in tqdm.tqdm(
- zip(prompts, answers),
- dynamic_ncols=True,
- desc="world generation",
- total=prompts.size(0),
- ):
+ bunch = zip(prompts, answers)
+
+ if progress_bar:
+ bunch = tqdm.tqdm(
+ bunch,
+ dynamic_ncols=True,
+ desc="world generation",
+ total=prompts.size(0),
+ )
+
+ for prompt, answer in bunch:
A = prompt[0 * (S + 1) : 0 * (S + 1) + S].view(self.height, self.width)
f_A = prompt[1 * (S + 1) : 1 * (S + 1) + S].view(self.height, self.width)
B = prompt[2 * (S + 1) : 2 * (S + 1) + S].view(self.height, self.width)
if __name__ == "__main__":
import time
- nb = 48
-
grids = Grids()
- # for t in grids.all_tasks():
- for t in [grids.task_ortho]:
- print(t.__name__)
- prompts, answers = grids.generate_prompts_and_answers(nb, tasks=[t])
- grids.save_quizzes("/tmp", t.__name__, prompts[:nb], answers[:nb], nrow=4)
+ if False:
+ nb = 8
- exit(0)
+ for t in grids.all_tasks():
+ # for t in [grids.task_ortho]:
+ print(t.__name__)
+ prompts, answers = grids.generate_prompts_and_answers(nb, tasks=[t])
+ grids.save_quizzes("/tmp", t.__name__, prompts[:nb], answers[:nb], nrow=2)
+
+ exit(0)
- nb = 72
+ nb = 500
- start_time = time.perf_counter()
- prompts, answers = grids.generate_prompts_and_answers(nb)
- delay = time.perf_counter() - start_time
- print(f"{prompts.size(0)/delay:02f} seq/s")
+ for t in grids.all_tasks():
+ start_time = time.perf_counter()
+ prompts, answers = grids.generate_prompts_and_answers(nb, tasks=[t])
+ delay = time.perf_counter() - start_time
+ print(f"{t.__name__} {prompts.size(0)/delay:02f} seq/s")
+
+ exit(0)
m = torch.randint(2, (prompts.size(0),))
predicted_prompts = m * (torch.randint(2, (prompts.size(0),)) * 2 - 1)
# Written by Francois Fleuret <francois@fleuret.org>
+import threading, queue, torch
+
class Problem:
def nb_token_values(self):
pass
+ def trivial_prompts_and_answers(self, prompts, answers):
+ pass
+
# returns two tensors nb x D and nb x D'
def generate_prompts_and_answers(self, nb):
pass
filename_prefix,
prompts,
answers,
- predicted_prompt=None,
+ predicted_prompts=None,
predicted_answers=None,
):
pass
+
+
+class MultiThreadProblem:
+ def __init__(self, problem, max_nb_cached_chunks, chunk_size):
+ self.problem = problem
+ self.chunk_size = chunk_size
+ self.queue = queue.Queue(maxsize=max_nb_cached_chunks)
+ threading.Thread(target=self.fill_cache, daemon=True).start()
+ self.rest = None
+
+ def nb_token_values(self):
+ return self.problem.nb_token_values()
+
+ def save_quizzes(
+ self,
+ result_dir,
+ filename_prefix,
+ prompts,
+ answers,
+ predicted_prompts=None,
+ predicted_answers=None,
+ ):
+ self.problem.save_quizzes(
+ result_dir,
+ filename_prefix,
+ prompts,
+ answers,
+ predicted_prompts=None,
+ predicted_answers=None,
+ )
+
+ def fill_cache(self):
+ while True:
+ prompts, answers = self.problem.generate_prompts_and_answers(
+ self.chunk_size
+ )
+
+ self.queue.put((prompts, answers), block=True)
+
+ def trivial_prompts_and_answers(self, prompts, answers):
+ return self.problem.trivial_prompts_and_answers(prompts, answers)
+
+ def generate_prompts_and_answers(self, nb):
+ if self.rest is not None:
+ prompts, answers = rest
+ else:
+ prompts, answers = [], []
+
+ self.rest = None
+
+ n = sum([p.size(0) for p in prompts])
+
+ while n < nb:
+ p, s = self.queue.get(block=True)
+ prompts.append(p)
+ answers.append(s)
+ n += p.size(0)
+
+ prompts, answers = torch.cat(prompts, dim=0), torch.cat(answers, dim=0)
+
+ k = n - nb
+
+ if k > 0:
+ rest = (prompts[-k:], answers[-k:])
+ prompts, answers = prompts[:-k], answers[:-k]
+
+ return prompts, answers