######################################################################
-def compute_distance(walls, i, j):
+def compute_distance(walls, goal_i, goal_j):
max_length = walls.numel()
dist = torch.full_like(walls, max_length)
- dist[i, j] = 0
+ dist[goal_i, goal_j] = 0
pred_dist = torch.empty_like(dist)
while True:
######################################################################
-def compute_policy(walls, i, j):
- distance = compute_distance(walls, i, j)
+def compute_policy(walls, goal_i, goal_j):
+ distance = compute_distance(walls, goal_i, goal_j)
distance = distance + walls.numel() * walls
value = distance.new_full((4,) + distance.size(), walls.numel())
return proba
+def stationary_densities(mazes, policies):
+ start = (mazes == v_start).nonzero(as_tuple=True)
+ probas = mazes.new_zeros(mazes.size())
+ pred_probas = probas.clone()
+ probas[start] = 1.0
+
+ while not pred_probas.equal(probas):
+ pred_probas.copy_(probas)
+ probas.zero_()
+ probas[:, 1:, :] = pred_probas[:, :-1, :] * policies[:, 0, :-1, :]
+ probas[:, :-1, :] = pred_probas[:, 1:, :] * policies[:, 1, 1:, :]
+ probas[:, :, 1:] = pred_probas[:, :, :-1] * policies[:, 2, :, :-1]
+ probas[:, :, :-1] = pred_probas[:, :, 1:] * policies[:, 3, :, 1:]
+ probas[start] = 1.0
+
+ return probas
+
+
######################################################################
[255, 255, 255], # empty
[0, 0, 0], # wall
[0, 255, 0], # start
- [0, 0, 255], # goal
+ [127, 127, 255], # goal
[255, 0, 0], # path
]
)
c_score_paths = score_paths.unsqueeze(1).expand(-1, 3, -1, -1)
c_score_paths = (
c_score_paths * colors[4].reshape(1, 3, 1, 1)
- + (1 - c_score_paths) * colors[3].reshape(1, 3, 1, 1)
+ + (1 - c_score_paths) * colors[0].reshape(1, 3, 1, 1)
).long()
c_score_paths = c_score_paths * (mazes.unsqueeze(1) == v_empty) + c_mazes * (
mazes.unsqueeze(1) != v_empty