+ def policy_batches(self, split="train", nb_to_use=-1):
+ assert split in {"train", "test"}
+ input = self.train_input if split == "train" else self.test_input
+ targets = self.train_policies if split == "train" else self.test_policies
+ input = input[:, : self.height * self.width]
+ targets = targets.flatten(-2) * (input != maze.v_wall)[:,None]
+
+ if nb_to_use > 0:
+ input = input[:nb_to_use]
+ targets = targets[:nb_to_use]
+
+ for batch in tqdm.tqdm(
+ zip(input.split(self.batch_size), targets.split(self.batch_size)),
+ dynamic_ncols=True,
+ desc=f"epoch-{split}",
+ ):
+ yield batch
+