parser.add_argument("--maze_nb_walls", type=int, default=15)
+##############################
+# Snake options
+
+parser.add_argument("--snake_height", type=int, default=6)
+
+parser.add_argument("--snake_width", type=int, default=8)
+
+parser.add_argument("--snake_nb_colors", type=int, default=3)
+
+parser.add_argument("--snake_length", type=int, default=100)
+
######################################################################
args = parser.parse_args()
######################################################################
+
+def generate_snake_sequences(
+ nb, height, width, nb_colors, length, device=torch.device("cpu")
+):
+ worlds = torch.randint(nb_colors, (nb, height, width), device=device)
+ # nb x 2
+ snake_position = torch.cat(
+ (
+ torch.randint(height, (nb, 1), device=device),
+ torch.randint(width, (nb, 1), device=device),
+ ),
+ 1,
+ )
+ snake_direction = torch.randint(4, (nb,), device=device)
+ sequences = torch.empty(nb, 2 * length, device=device, dtype=torch.int64)
+ count = torch.arange(nb, device=device) # [:,None]
+
+ for l in range(length):
+ # nb x 3
+ snake_next_direction = torch.cat(
+ (
+ (snake_direction[:, None] - 1) % 4,
+ snake_direction[:, None],
+ (snake_direction[:, None] + 1) % 4,
+ ),
+ 1,
+ )
+
+ # nb x 3
+ vh = (snake_next_direction + 1) % 2 * (snake_next_direction - 1)
+ vw = snake_next_direction % 2 * (snake_next_direction - 2)
+
+ # nb x 3 x 2
+ snake_next_speed = torch.cat((vh[:, :, None], vw[:, :, None]), 2)
+ snake_next_position = snake_position[:, None, :] + snake_next_speed
+
+ # nb x 3
+ val = torch.logical_and(
+ torch.logical_and(
+ snake_next_position[:, :, 0] >= 0, snake_next_position[:, :, 0] < height
+ ),
+ torch.logical_and(
+ snake_next_position[:, :, 1] >= 0, snake_next_position[:, :, 1] < width
+ ),
+ ).float()
+ val = (
+ torch.rand_like(val) * val * torch.tensor([[1.0, 4.0, 1.0]], device=device)
+ )
+
+ # nb
+ i = torch.arange(val.size(0), device=device)
+ j = val.argmax(1)
+ snake_direction = snake_next_direction[i, j]
+
+ sequences[:, 2 * l] = worlds[count, snake_position[:, 0], snake_position[:, 1]]
+ sequences[:, 2 * l + 1] = snake_direction
+
+ # nb x 2
+ snake_position = snake_next_position[i, j]
+
+ return sequences, worlds
+
+ # print(snake_position)
+
+
+# generate_snake_sequences(nb=1, height=4, width=6, nb_colors=3, length=20)
+# exit(0)
+
+
class TaskSnake(Task):
def __init__(
self,
batch_size,
height,
width,
- nb_walls,
+ nb_colors,
+ length,
device=torch.device("cpu"),
):
self.batch_size = batch_size
self.width = width
self.device = device
- # self.train_input =
- # self.test_input =
+ self.train_input, self.train_worlds = generate_snake_sequences(
+ nb_train_samples, height, width, nb_colors, length, self.device
+ )
+ self.test_input, self.test_worlds = generate_snake_sequences(
+ nb_test_samples, height, width, nb_colors, length, self.device
+ )
- self.nb_codes = max(self.train_input.max(), self.train_input.max()) + 1
+ self.nb_codes = max(self.train_input.max(), self.test_input.max()) + 1
def batches(self, split="train", nb_to_use=-1, desc=None):
assert split in {"train", "test"}
):
yield batch
+ def vocabulary_size(self):
+ return self.nb_codes
+
######################################################################
device=device,
)
+elif args.task == "snake":
+ task = TaskSnake(
+ nb_train_samples=args.nb_train_samples,
+ nb_test_samples=args.nb_test_samples,
+ batch_size=args.batch_size,
+ height=args.snake_height,
+ width=args.snake_width,
+ nb_colors=args.snake_nb_colors,
+ length=args.snake_length,
+ device=device,
+ )
+
else:
raise ValueError(f"Unknown task {args.task}")