-!/usr/bin/env python
+#!/usr/bin/env python
# Any copyright is dedicated to the Public Domain.
# https://creativecommons.org/publicdomain/zero/1.0/
image_name = os.path.join(args.result_dir, f"picoclvr_result_{n_epoch:04d}.png")
torchvision.utils.save_image(
- img / 255.0, image_name, nrow=nb_per_primer, padding=1, pad_value=1.0
+ img / 255.0, image_name, nrow=nb_per_primer, padding=1, pad_value=0.0
)
log_string(f"wrote {image_name}")
def compute_error(self, model, split="train", nb_to_use=-1):
nb_total, nb_correct = 0, 0
count = torch.zeros(
- self.width * self.height, self.width * self.height, device=self.device, dtype=torch.int64
+ self.width * self.height,
+ self.width * self.height,
+ device=self.device,
+ dtype=torch.int64,
)
- for input in task.batches(split, nb_to_use):
+ for input in tqdm.tqdm(
+ task.batches(split, nb_to_use),
+ dynamic_ncols=True,
+ desc=f"test-mazes",
+ ):
result = input.clone()
ar_mask = result.new_zeros(result.size())
ar_mask[:, self.height * self.width :] = 1
result *= 1 - ar_mask
masked_inplace_autoregression(
- model, self.batch_size, result, ar_mask, device=self.device
+ model,
+ self.batch_size,
+ result,
+ ar_mask,
+ progress_bar_desc=None,
+ device=self.device,
)
mazes, paths = self.seq2map(result)
path_correctness = maze.path_correctness(mazes, paths)
target_paths=paths,
predicted_paths=predicted_paths,
path_correct=maze.path_correctness(mazes, predicted_paths),
+ path_optimal=maze.path_optimality(paths, predicted_paths),
)
log_string(f"wrote {filename}")
assert n < nmax
+def path_optimality(ref_paths, paths):
+ return (ref_paths == v_path).long().flatten(1).sum(1) == (
+ paths == v_path
+ ).long().flatten(1).sum(1)
+
+
def path_correctness(mazes, paths):
- still_ok = (mazes - (paths * (paths < 4))).view(mazes.size(0), -1).abs().sum(1) == 0
+ still_ok = (mazes - (paths * (paths != v_path))).view(mazes.size(0), -1).abs().sum(
+ 1
+ ) == 0
reached = still_ok.new_zeros(still_ok.size())
current, pred_current = paths.clone(), paths.new_zeros(paths.size())
goal = (mazes == v_goal).long()
score_paths=None,
score_truth=None,
path_correct=None,
+ path_optimal=None,
):
colors = torch.tensor(
[
)
imgs = torch.cat((imgs, c_score_paths.unsqueeze(1)), 1)
+ img = torch.tensor([224, 224, 224]).view(1, -1, 1, 1)
+
# NxKxCxHxW
- if path_correct is None:
- path_correct = torch.zeros(imgs.size(0)) <= 1
- path_correct = path_correct.cpu().long().view(-1, 1, 1, 1)
- img = torch.tensor([224, 224, 224]).view(1, -1, 1, 1) * path_correct + torch.tensor(
- [255, 0, 0]
- ).view(1, -1, 1, 1) * (1 - path_correct)
+ if path_optimal is not None:
+ path_optimal = path_optimal.cpu().long().view(-1, 1, 1, 1)
+ img = (
+ img * (1 - path_optimal)
+ + torch.tensor([0, 255, 0]).view(1, -1, 1, 1) * path_optimal
+ )
+
+ if path_correct is not None:
+ path_correct = path_correct.cpu().long().view(-1, 1, 1, 1)
+ img = img * path_correct + torch.tensor([255, 0, 0]).view(1, -1, 1, 1) * (
+ 1 - path_correct
+ )
+
img = img.expand(
-1, -1, imgs.size(3) + 2, 1 + imgs.size(1) * (1 + imgs.size(4))
).clone()
+
for k in range(imgs.size(1)):
img[
:,