answers,
predicted_prompts=None,
predicted_answers=None,
+ nrow=4,
):
prompts = prompts.reshape(prompts.size(0), self.height, -1)
answers = answers.reshape(answers.size(0), self.height, -1)
y[...] = c
else:
c = c.long()[:, None]
- c = c * torch.tensor([192, 192, 192], device=c.device) + (
- 1 - c
- ) * torch.tensor([255, 255, 255], device=c.device)
+ c = (
+ (1 - ((c == 1).long() + (c == 0).long() + (c == -1).long()))
+ * torch.tensor([192, 192, 192], device=c.device)
+ + (c == 1).long() * torch.tensor([0, 255, 0], device=c.device)
+ + (c == 0).long() * torch.tensor([255, 255, 255], device=c.device)
+ + (c == -1).long() * torch.tensor([255, 0, 0], device=c.device)
+ )
y[...] = c[:, :, None, None]
y[:, :, di : di + x.size(2), dj : dj + x.size(3)] = x
image_name = os.path.join(result_dir, filename)
torchvision.utils.save_image(
- img.float() / 255.0, image_name, nrow=4, padding=margin * 4, pad_value=1.0
+ img.float() / 255.0,
+ image_name,
+ nrow=nrow,
+ padding=margin * 4,
+ pad_value=1.0,
)
######################################################################
if l > 3:
break
+ def task_scale(self, A, f_A, B, f_B):
+ c = torch.randperm(len(self.colors) - 1)[:2] + 1
+
+ i, j = torch.randint(self.height // 2, (1,)), torch.randint(
+ self.width // 2, (1,)
+ )
+
+ for X, f_X in [(A, f_A), (B, f_B)]:
+ for _ in range(3):
+ while True:
+ i1, j1 = torch.randint(self.height // 2 + 1, (1,)), torch.randint(
+ self.width // 2 + 1, (1,)
+ )
+ i2, j2 = torch.randint(self.height // 2 + 1, (1,)), torch.randint(
+ self.width // 2 + 1, (1,)
+ )
+ if i1 < i2 and j1 < j2 and min(i2 - i1, j2 - j1) <= 3:
+ break
+ X[i + i1 : i + i2, j + j1 : j + j2] = c[0]
+ f_X[2 * i1 : 2 * i2, 2 * j1 : 2 * j2] = c[0]
+
+ X[i, j] = c[1]
+ f_X[0:2, 0:2] = c[1]
+
######################################################################
- def generate_prompts_and_answers(self, nb, device="cpu"):
- tasks = [
+ def all_tasks(self):
+ return [
self.task_replace_color,
self.task_translate,
self.task_grow,
self.task_count,
self.task_trajectory,
self.task_bounce,
+ self.task_scale,
]
+
+ def generate_prompts_and_answers(self, nb, tasks=None, device="cpu"):
+ if tasks is None:
+ tasks = self.all_tasks()
+
prompts = torch.zeros(nb, self.height, self.width * 3, dtype=torch.int64)
answers = torch.zeros(nb, self.height, self.width, dtype=torch.int64)
w = self.width
answers,
predicted_prompts=None,
predicted_answers=None,
+ nrow=4,
):
self.save_image(
result_dir,
answers,
predicted_prompts,
predicted_answers,
+ nrow,
)
if __name__ == "__main__":
import time
+ nb = 4
+
reasoning = Reasoning()
+ for t in reasoning.all_tasks():
+ print(t.__name__)
+ prompts, answers = reasoning.generate_prompts_and_answers(nb, tasks=[t])
+ reasoning.save_quizzes("/tmp", t.__name__, prompts[:nb], answers[:nb], nrow=1)
+
+ exit(0)
+
start_time = time.perf_counter()
- prompts, answers = reasoning.generate_prompts_and_answers(100)
+ prompts, answers = reasoning.generate_prompts_and_answers(nb)
delay = time.perf_counter() - start_time
print(f"{prompts.size(0)/delay:02f} seq/s")
- predicted_prompts = torch.rand(prompts.size(0)) < 0.5
- predicted_answers = torch.logical_not(predicted_prompts)
+ # m = torch.randint(2, (prompts.size(0),))
+ # predicted_prompts = m * (torch.randint(2, (prompts.size(0),)) * 2 - 1)
+ # predicted_answers = (1 - m) * (torch.randint(2, (prompts.size(0),)) * 2 - 1)
reasoning.save_quizzes(
"/tmp",
"test",
- prompts[:64],
- answers[:64],
+ prompts[:nb],
+ answers[:nb],
# You can add a bool to put a frame around the predicted parts
- # predicted_prompts[:64],
- # predicted_answers[:64],
+ # predicted_prompts[:nb],
+ # predicted_answers[:nb],
)