3 # Any copyright is dedicated to the Public Domain.
4 # https://creativecommons.org/publicdomain/zero/1.0/
6 # Written by Francois Fleuret <francois@fleuret.org>
10 from torch.nn import functional as F
12 ######################################################################
17 nb_lookahead_rewards_codes = 3
20 first_actions_code = first_state_code + nb_state_codes
21 first_rewards_code = first_actions_code + nb_actions_codes
22 first_lookahead_rewards_code = first_rewards_code + nb_rewards_codes
23 nb_codes = first_lookahead_rewards_code + nb_lookahead_rewards_codes
25 ######################################################################
29 return first_actions_code + r
33 return r - first_actions_code
37 return first_rewards_code + r + 1
41 return r - first_rewards_code - 1
44 def lookahead_reward2code(r):
45 return first_lookahead_rewards_code + r + 1
48 def code2lookahead_reward(r):
49 return r - first_lookahead_rewards_code - 1
52 ######################################################################
55 def generate_episodes(nb, height=6, width=6, T=10, nb_walls=3):
56 rnd = torch.rand(nb, height, width)
63 for k in range(nb_walls):
65 rnd.flatten(1).argmax(dim=1)[:, None]
66 == torch.arange(rnd.flatten(1).size(1))[None, :]
67 ).long().reshape(rnd.size())
69 rnd = rnd * (1 - wall.clamp(max=1))
71 states = wall[:, None, :, :].expand(-1, T, -1, -1).clone()
73 agent = torch.zeros(states.size(), dtype=torch.int64)
75 agent_actions = torch.randint(5, (nb, T))
76 rewards = torch.zeros(nb, T, dtype=torch.int64)
78 monster = torch.zeros(states.size(), dtype=torch.int64)
79 monster[:, 0, -1, -1] = 1
80 monster_actions = torch.randint(5, (nb, T))
82 all_moves = agent.new(nb, 5, height, width)
83 for t in range(T - 1):
85 all_moves[:, 0] = agent[:, t]
86 all_moves[:, 1, 1:, :] = agent[:, t, :-1, :]
87 all_moves[:, 2, :-1, :] = agent[:, t, 1:, :]
88 all_moves[:, 3, :, 1:] = agent[:, t, :, :-1]
89 all_moves[:, 4, :, :-1] = agent[:, t, :, 1:]
90 a = F.one_hot(agent_actions[:, t], num_classes=5)[:, :, None, None]
91 after_move = (all_moves * a).sum(dim=1)
93 (after_move * (1 - wall) * (1 - monster[:, t]))
95 .sum(dim=1)[:, None, None]
98 agent[:, t + 1] = collision * agent[:, t] + (1 - collision) * after_move
101 all_moves[:, 0] = monster[:, t]
102 all_moves[:, 1, 1:, :] = monster[:, t, :-1, :]
103 all_moves[:, 2, :-1, :] = monster[:, t, 1:, :]
104 all_moves[:, 3, :, 1:] = monster[:, t, :, :-1]
105 all_moves[:, 4, :, :-1] = monster[:, t, :, 1:]
106 a = F.one_hot(monster_actions[:, t], num_classes=5)[:, :, None, None]
107 after_move = (all_moves * a).sum(dim=1)
109 (after_move * (1 - wall) * (1 - agent[:, t + 1]))
111 .sum(dim=1)[:, None, None]
114 monster[:, t + 1] = collision * monster[:, t] + (1 - collision) * after_move
117 (agent[:, t + 1, 1:, :] * monster[:, t + 1, :-1, :]).flatten(1).sum(dim=1)
118 + (agent[:, t + 1, :-1, :] * monster[:, t + 1, 1:, :]).flatten(1).sum(dim=1)
119 + (agent[:, t + 1, :, 1:] * monster[:, t + 1, :, :-1]).flatten(1).sum(dim=1)
120 + (agent[:, t + 1, :, :-1] * monster[:, t + 1, :, 1:]).flatten(1).sum(dim=1)
122 hit = (hit > 0).long()
124 # assert hit.min() == 0 and hit.max() <= 1
126 rewards[:, t + 1] = -hit + (1 - hit) * agent[:, t + 1, -1, -1]
128 states += 2 * agent + 3 * monster
130 return states, agent_actions, rewards
133 ######################################################################
136 def episodes2seq(states, actions, rewards, lookahead_delta=None):
137 states = states.flatten(2) + first_state_code
138 actions = actions[:, :, None] + first_actions_code
140 if lookahead_delta is not None:
141 a = rewards.new_zeros(rewards.size())
142 b = rewards.new_zeros(rewards.size())
143 for t in range(a.size(1) - 1):
144 a[:, t] = rewards[:, t + 1 :].min(dim=-1).values
145 b[:, t] = rewards[:, t + 1 :].max(dim=-1).values
146 s = (a < 0).long() * a + (a >= 0).long() * b
147 lookahead_rewards = (1 + s[:, :, None]) + first_lookahead_rewards_code
149 r = rewards[:, :, None]
150 rewards = (r + 1) + first_rewards_code
153 # states.min() >= first_state_code
154 # and states.max() < first_state_code + nb_state_codes
157 # actions.min() >= first_actions_code
158 # and actions.max() < first_actions_code + nb_actions_codes
161 # rewards.min() >= first_rewards_code
162 # and rewards.max() < first_rewards_code + nb_rewards_codes
165 if lookahead_delta is None:
166 return torch.cat([states, actions, rewards], dim=2).flatten(1)
169 # lookahead_rewards.min() >= first_lookahead_rewards_code
170 # and lookahead_rewards.max()
171 # < first_lookahead_rewards_code + nb_lookahead_rewards_codes
173 return torch.cat([states, actions, rewards, lookahead_rewards], dim=2).flatten(
178 def seq2episodes(seq, height, width, lookahead=False):
179 seq = seq.reshape(seq.size(0), -1, height * width + (3 if lookahead else 2))
180 states = seq[:, :, : height * width] - first_state_code
181 states = states.reshape(states.size(0), states.size(1), height, width)
182 actions = seq[:, :, height * width] - first_actions_code
183 rewards = seq[:, :, height * width + 1] - first_rewards_code - 1
186 lookahead_rewards = (
187 seq[:, :, height * width + 2] - first_lookahead_rewards_code - 1
189 return states, actions, rewards, lookahead_rewards
191 return states, actions, rewards
196 if t >= first_state_code and t < first_state_code + nb_state_codes:
197 return " #@$"[t - first_state_code]
198 elif t >= first_actions_code and t < first_actions_code + nb_actions_codes:
199 return "ISNEW"[t - first_actions_code]
200 elif t >= first_rewards_code and t < first_rewards_code + nb_rewards_codes:
201 return "-0+"[t - first_rewards_code]
203 t >= first_lookahead_rewards_code
204 and t < first_lookahead_rewards_code + nb_lookahead_rewards_codes
206 return "n.p"[t - first_lookahead_rewards_code]
210 return ["".join([token2str(x.item()) for x in row]) for row in seq]
213 ######################################################################
217 states, actions, rewards, lookahead_rewards=None, unicode=False, ansi_colors=False
221 # vert, hori, cross, thin_hori = "║", "═", "╬", "─"
222 vert, hori, cross, thin_vert, thin_hori = "┃", "━", "╋", "│", "─"
225 vert, hori, cross, thin_vert, thin_hori = "|", "-", "+", "|", "-"
227 hline = (cross + hori * states.size(-1)) * states.size(1) + cross + "\n"
231 for n in range(states.size(0)):
235 return "?" if v < 0 or v >= len(symbols) else symbols[v]
237 for i in range(states.size(2)):
241 ["".join([state_symbol(v) for v in row]) for row in states[n, :, i]]
247 # result += (vert + thin_hori * states.size(-1)) * states.size(1) + vert + "\n"
249 def status_bar(a, r, lr=None):
250 a, r = a.item(), r.item()
251 sb_a = "ISNEW"[a] if a >= 0 and a < 5 else "?"
252 sb_r = "- +"[r + 1] if r in {-1, 0, 1} else "?"
257 sb_lr = "n p"[lr + 1] if lr in {-1, 0, 1} else "?"
262 + " " * (states.size(-1) - 1 - len(sb_a + sb_r + sb_lr))
266 if lookahead_rewards is None:
269 + vert.join([status_bar(a, r) for a, r in zip(actions[n], rewards[n])])
280 actions[n], rewards[n], lookahead_rewards[n]
291 for u, c in [("$", 31), ("@", 32)]:
292 result = result.replace(u, f"\u001b[{c}m{u}\u001b[0m")
297 ######################################################################
299 if __name__ == "__main__":
300 nb, height, width, T, nb_walls = 25, 5, 7, 25, 5
301 states, actions, rewards = generate_episodes(nb, height, width, T, nb_walls)
302 seq = episodes2seq(states, actions, rewards, lookahead_delta=T)
303 s, a, r, lr = seq2episodes(seq, height, width, lookahead=True)
304 print(episodes2str(s, a, r, lookahead_rewards=lr, unicode=True, ansi_colors=True))
306 # for s in seq2str(seq):