+ marker_size = 16
+
+ separator = img_prompts.new_full(
+ (
+ img_prompts.size(0),
+ img_prompts.size(1),
+ img_prompts.size(2),
+ marker_size,
+ ),
+ 255,
+ )
+
+ separator[:, :, 0] = 0
+ separator[:, :, h - 1] = 0
+
+ for k in range(1, 2 * marker_size - 8):
+ i = k - (marker_size - 4)
+ j = marker_size - 5 - abs(i)
+ separator[:, :, h // 2 - 1 + i, 2 + j] = 0
+ separator[:, :, h // 2 - 1 + i + 1, 2 + j] = 0
+
+ img = torch.cat([img_prompts, separator, img_answers], dim=3)
+
+ image_name = os.path.join(result_dir, filename)
+ torchvision.utils.save_image(
+ img.float() / 255.0, image_name, nrow=6, padding=margin * 4, pad_value=1.0
+ )
+
+ ######################################################################
+
+ def nb_token_values(self):
+ return len(self.colors)
+
+ def generate_prompts_and_answers(self, nb):
+ frame_sequences = self.generate_frame_sequences(nb)
+ frame_sequences = torch.cat([x[None] for x in frame_sequences], dim=0)
+
+ prompts = frame_sequences[:, : frame_sequences.size(1) // 2].flatten(1)
+
+ answers = frame_sequences[:, frame_sequences.size(1) // 2 :].flatten(1)
+
+ # warnings.warn("dirty test with longer answer", RuntimeWarning)
+ # answers = torch.cat(
+ # [
+ # frame_sequences[:, frame_sequences.size(1) // 2 :],
+ # frame_sequences[:, frame_sequences.size(1) // 2 :],
+ # ],
+ # dim=3,
+ # ).flatten(1)
+
+ return prompts, answers
+
+ def save_quizzes(
+ self,
+ result_dir,
+ filename_prefix,
+ prompts,
+ answers,
+ predicted_prompts=None,
+ predicted_answers=None,
+ ):
+ self.save_image(
+ result_dir,
+ filename_prefix + ".png",
+ prompts,
+ answers,
+ predicted_prompts,
+ predicted_answers,
+ )