parser.add_argument("--seed", type=int, default=0)
-parser.add_argument("--nb_epochs", type=int, default=25)
+parser.add_argument("--nb_epochs", type=int, default=None)
parser.add_argument("--batch_size", type=int, default=None)
parser.add_argument("--snake_width", type=int, default=8)
-parser.add_argument("--snake_nb_colors", type=int, default=3)
+parser.add_argument("--snake_nb_colors", type=int, default=5)
parser.add_argument("--snake_length", type=int, default=400)
default_args = {
"picoclvr": {
+ "nb_epochs": 25,
"batch_size": 25,
},
"mnist": {
+ "nb_epochs": 25,
"batch_size": 10,
},
"maze": {
+ "nb_epochs": 25,
"batch_size": 25,
},
"snake": {
+ "nb_epochs": 25,
"batch_size": 20,
},
}
def masked_inplace_autoregression(
model, batch_size, input, ar_mask, forbidden_tokens=None, device=torch.device("cpu")
):
- for input, ar_mask in zip(input.split(batch_size), ar_mask.split(batch_size)):
+ for input, ar_mask in tqdm.tqdm(
+ zip(input.split(batch_size), ar_mask.split(batch_size)),
+ dynamic_ncols=True,
+ desc="autoregression",
+ total=input.size(0) // batch_size,
+ ):
i = (ar_mask.sum(0) > 0).nonzero()
if i.min() > 0:
model(
######################################################################
-def generate_snake_sequences(
- nb, height, width, nb_colors, length, device=torch.device("cpu")
-):
- worlds = torch.randint(nb_colors, (nb, height, width), device=device)
- nb_prior_visits = torch.zeros(nb, height, width, device=device)
-
- # nb x 2
- snake_position = torch.cat(
- (
- torch.randint(height, (nb, 1), device=device),
- torch.randint(width, (nb, 1), device=device),
- ),
- 1,
- )
- snake_direction = torch.randint(4, (nb,), device=device)
- sequences = torch.empty(nb, 2 * length, device=device, dtype=torch.int64)
- sequences_prior_visits = torch.zeros(
- nb, 2 * length, device=device, dtype=torch.int64
- )
- i = torch.arange(nb, device=device) # [:,None]
-
- for l in range(length):
- # nb x 3
- snake_next_direction = torch.cat(
- (
- (snake_direction[:, None] - 1) % 4,
- snake_direction[:, None],
- (snake_direction[:, None] + 1) % 4,
- ),
- 1,
- )
-
- # nb x 3
- vh = (snake_next_direction + 1) % 2 * (snake_next_direction - 1)
- vw = snake_next_direction % 2 * (snake_next_direction - 2)
-
- # nb x 3 x 2
- snake_next_speed = torch.cat((vh[:, :, None], vw[:, :, None]), 2)
- snake_next_position = snake_position[:, None, :] + snake_next_speed
-
- # nb x 3
- val = torch.logical_and(
- torch.logical_and(
- snake_next_position[:, :, 0] >= 0, snake_next_position[:, :, 0] < height
- ),
- torch.logical_and(
- snake_next_position[:, :, 1] >= 0, snake_next_position[:, :, 1] < width
- ),
- ).float()
- val = (
- # The multiplicative factors bias toward moving forward
- torch.rand_like(val)
- * val
- * torch.tensor([[1.0, 2.0, 1.0]], device=device)
- )
-
- # nb
- j = val.argmax(1)
- snake_direction = snake_next_direction[i, j]
-
- sequences[:, 2 * l] = worlds[i, snake_position[:, 0], snake_position[:, 1]] + 4
- sequences_prior_visits[:, 2 * l] = nb_prior_visits[
- i, snake_position[:, 0], snake_position[:, 1]
- ]
- nb_prior_visits[i, snake_position[:, 0], snake_position[:, 1]] += 1
- sequences[:, 2 * l + 1] = snake_direction
-
- # nb x 2
- snake_position = snake_next_position[i, j]
-
- return sequences, sequences_prior_visits
-
-
-# generate_snake_sequences(nb=1, height=4, width=6, nb_colors=3, length=20)
-# exit(0)
+import snake
class TaskSnake(Task):
width,
nb_colors,
length,
+ prompt_length,
device=torch.device("cpu"),
):
self.batch_size = batch_size
self.height = height
self.width = width
self.device = device
+ self.prompt_length = prompt_length
- self.train_input, self.train_prior_visits = generate_snake_sequences(
- nb_train_samples, height, width, nb_colors, length, self.device
+ self.train_input, self.train_prior_visits = snake.generate_sequences(
+ nb_train_samples,
+ height,
+ width,
+ nb_colors,
+ length,
+ prompt_length,
+ self.device,
)
- self.test_input, self.test_prior_visits = generate_snake_sequences(
- nb_test_samples, height, width, nb_colors, length, self.device
+ self.test_input, self.test_prior_visits = snake.generate_sequences(
+ nb_test_samples,
+ height,
+ width,
+ nb_colors,
+ length,
+ prompt_length,
+ self.device,
)
self.nb_codes = max(self.train_input.max(), self.test_input.max()) + 1
def compute_nb_correct(input, prior_visits):
result = input.clone()
i = torch.arange(result.size(1), device=result.device)[None, :]
- ar_mask = torch.logical_and(i >= i.size(0) // 2, i % 2 == 0).long()
+ ar_mask = (
+ torch.logical_and(i >= self.prompt_length * 2, i % 2 == 0)
+ .long()
+ .expand_as(result)
+ )
result *= 1 - ar_mask
+
+ # snake.solver(result,ar_mask)
+
masked_inplace_autoregression(
model, self.batch_size, result, ar_mask, device=self.device
)
- nb_total = (
- (prior_visits > 0) * ar_mask
- ).sum()
+ nb_total = ((prior_visits > 0) * ar_mask).sum()
nb_correct = (
(result == input).long() * (prior_visits > 0) * ar_mask
return nb_total, nb_correct
- train_nb_total, train_nb_correct = compute_nb_correct(
- self.train_input, self.train_prior_visits
- )
+ # train_nb_total, train_nb_correct = compute_nb_correct(
+ # self.train_input, self.train_prior_visits
+ # )
- log_string(
- f"accuracy_train nb_total {train_nb_total} nb_correct {train_nb_correct} accuracy {(100.0*train_nb_correct)/train_nb_total:.02f}%"
- )
+ # log_string(
+ # f"accuracy_train nb_total {train_nb_total} nb_correct {train_nb_correct} accuracy {(100.0*train_nb_correct)/train_nb_total:.02f}%"
+ # )
test_nb_total, test_nb_correct = compute_nb_correct(
- self.test_input, self.test_prior_visits
+ self.test_input[:1000], self.test_prior_visits[:1000]
)
log_string(
width=args.snake_width,
nb_colors=args.snake_nb_colors,
length=args.snake_length,
+ prompt_length=args.snake_length // 2,
device=device,
)