3 # Any copyright is dedicated to the Public Domain.
4 # https://creativecommons.org/publicdomain/zero/1.0/
6 # Written by Francois Fleuret <francois@fleuret.org>
10 from torch.nn import functional as F
12 ######################################################################
17 nb_lookahead_rewards_codes = 3
20 first_actions_code = first_state_code + nb_state_codes
21 first_rewards_code = first_actions_code + nb_actions_codes
22 first_lookahead_rewards_code = first_rewards_code + nb_rewards_codes
23 nb_codes = first_lookahead_rewards_code + nb_lookahead_rewards_codes
25 ######################################################################
28 def generate_episodes(nb, height=6, width=6, T=10):
29 rnd = torch.rand(nb, height, width)
38 rnd.flatten(1).argmax(dim=1)[:, None]
39 == torch.arange(rnd.flatten(1).size(1))[None, :]
40 ).long().reshape(rnd.size())
41 rnd = rnd * (1 - wall.clamp(max=1))
43 states = wall[:, None, :, :].expand(-1, T, -1, -1).clone()
45 agent = torch.zeros(states.size(), dtype=torch.int64)
47 agent_actions = torch.randint(5, (nb, T))
48 rewards = torch.zeros(nb, T, dtype=torch.int64)
50 monster = torch.zeros(states.size(), dtype=torch.int64)
51 monster[:, 0, -1, -1] = 1
52 monster_actions = torch.randint(5, (nb, T))
54 all_moves = agent.new(nb, 5, height, width)
55 for t in range(T - 1):
57 all_moves[:, 0] = agent[:, t]
58 all_moves[:, 1, 1:, :] = agent[:, t, :-1, :]
59 all_moves[:, 2, :-1, :] = agent[:, t, 1:, :]
60 all_moves[:, 3, :, 1:] = agent[:, t, :, :-1]
61 all_moves[:, 4, :, :-1] = agent[:, t, :, 1:]
62 a = F.one_hot(agent_actions[:, t], num_classes=5)[:, :, None, None]
63 after_move = (all_moves * a).sum(dim=1)
65 (after_move * (1 - wall) * (1 - monster[:, t]))
67 .sum(dim=1)[:, None, None]
70 agent[:, t + 1] = collision * agent[:, t] + (1 - collision) * after_move
73 all_moves[:, 0] = monster[:, t]
74 all_moves[:, 1, 1:, :] = monster[:, t, :-1, :]
75 all_moves[:, 2, :-1, :] = monster[:, t, 1:, :]
76 all_moves[:, 3, :, 1:] = monster[:, t, :, :-1]
77 all_moves[:, 4, :, :-1] = monster[:, t, :, 1:]
78 a = F.one_hot(monster_actions[:, t], num_classes=5)[:, :, None, None]
79 after_move = (all_moves * a).sum(dim=1)
81 (after_move * (1 - wall) * (1 - agent[:, t + 1]))
83 .sum(dim=1)[:, None, None]
86 monster[:, t + 1] = collision * monster[:, t] + (1 - collision) * after_move
89 (agent[:, t + 1, 1:, :] * monster[:, t + 1, :-1, :]).flatten(1).sum(dim=1)
90 + (agent[:, t + 1, :-1, :] * monster[:, t + 1, 1:, :]).flatten(1).sum(dim=1)
91 + (agent[:, t + 1, :, 1:] * monster[:, t + 1, :, :-1]).flatten(1).sum(dim=1)
92 + (agent[:, t + 1, :, :-1] * monster[:, t + 1, :, 1:]).flatten(1).sum(dim=1)
94 hit = (hit > 0).long()
96 assert hit.min() == 0 and hit.max() <= 1
98 rewards[:, t + 1] = -hit + (1 - hit) * agent[:, t + 1, -1, -1]
100 states += 2 * agent + 3 * monster
102 return states, agent_actions, rewards
105 ######################################################################
108 def episodes2seq(states, actions, rewards, lookahead_delta=None):
109 states = states.flatten(2) + first_state_code
110 actions = actions[:, :, None] + first_actions_code
112 if lookahead_delta is not None:
114 # u = F.pad(r, (0, lookahead_delta - 1)).as_strided(
115 # (r.size(0), r.size(1), lookahead_delta),
116 # (r.size(1) + lookahead_delta - 1, 1, 1),
118 # a = u[:, :, 1:].min(dim=-1).values
119 # b = u[:, :, 1:].max(dim=-1).values
120 # s = (a < 0).long() * a + (a >= 0).long() * b
121 # lookahead_rewards = (1 + s[:, :, None]) + first_lookahead_rewards_code
123 # a[n,t]=min_s>t r[n,s]
124 a = rewards.new_zeros(rewards.size())
125 b = rewards.new_zeros(rewards.size())
126 for t in range(a.size(1) - 1):
127 a[:, t] = rewards[:, t + 1 :].min(dim=-1).values
128 b[:, t] = rewards[:, t + 1 :].max(dim=-1).values
129 s = (a < 0).long() * a + (a >= 0).long() * b
130 lookahead_rewards = (1 + s[:, :, None]) + first_lookahead_rewards_code
132 r = rewards[:, :, None]
133 rewards = (r + 1) + first_rewards_code
136 states.min() >= first_state_code
137 and states.max() < first_state_code + nb_state_codes
140 actions.min() >= first_actions_code
141 and actions.max() < first_actions_code + nb_actions_codes
144 rewards.min() >= first_rewards_code
145 and rewards.max() < first_rewards_code + nb_rewards_codes
148 if lookahead_delta is None:
149 return torch.cat([states, actions, rewards], dim=2).flatten(1)
152 lookahead_rewards.min() >= first_lookahead_rewards_code
153 and lookahead_rewards.max()
154 < first_lookahead_rewards_code + nb_lookahead_rewards_codes
156 return torch.cat([states, actions, rewards, lookahead_rewards], dim=2).flatten(
161 def seq2episodes(seq, height, width, lookahead=False):
162 seq = seq.reshape(seq.size(0), -1, height * width + (3 if lookahead else 2))
163 states = seq[:, :, : height * width] - first_state_code
164 states = states.reshape(states.size(0), states.size(1), height, width)
165 actions = seq[:, :, height * width] - first_actions_code
166 rewards = seq[:, :, height * width + 1] - first_rewards_code - 1
169 lookahead_rewards = (
170 seq[:, :, height * width + 2] - first_lookahead_rewards_code - 1
172 return states, actions, rewards, lookahead_rewards
174 return states, actions, rewards
179 if t >= first_state_code and t < first_state_code + nb_state_codes:
180 return " #@$"[t - first_state_code]
181 elif t >= first_actions_code and t < first_actions_code + nb_actions_codes:
182 return "ISNEW"[t - first_actions_code]
183 elif t >= first_rewards_code and t < first_rewards_code + nb_rewards_codes:
184 return "-0+"[t - first_rewards_code]
186 t >= first_lookahead_rewards_code
187 and t < first_lookahead_rewards_code + nb_lookahead_rewards_codes
189 return "n.p"[t - first_lookahead_rewards_code]
193 return ["".join([token2str(x.item()) for x in row]) for row in seq]
196 ######################################################################
200 states, actions, rewards, lookahead_rewards=None, unicode=False, ansi_colors=False
204 # vert, hori, cross, thin_hori = "║", "═", "╬", "─"
205 vert, hori, cross, thin_vert, thin_hori = "┃", "━", "╋", "│", "─"
208 vert, hori, cross, thin_vert, thin_hori = "|", "-", "+", "|", "-"
210 hline = (cross + hori * states.size(-1)) * states.size(1) + cross + "\n"
214 for n in range(states.size(0)):
218 return "?" if v < 0 or v >= len(symbols) else symbols[v]
220 for i in range(states.size(2)):
224 ["".join([state_symbol(v) for v in row]) for row in states[n, :, i]]
230 # result += (vert + thin_hori * states.size(-1)) * states.size(1) + vert + "\n"
232 def status_bar(a, r, lr=None):
233 a, r = a.item(), r.item()
234 sb_a = "ISNEW"[a] if a >= 0 and a < 5 else "?"
235 sb_r = "- +"[r + 1] if r in {-1, 0, 1} else "?"
240 sb_lr = "n p"[lr + 1] if lr in {-1, 0, 1} else "?"
245 + " " * (states.size(-1) - 1 - len(sb_a + sb_r + sb_lr))
249 if lookahead_rewards is None:
252 + vert.join([status_bar(a, r) for a, r in zip(actions[n], rewards[n])])
263 actions[n], rewards[n], lookahead_rewards[n]
274 for u, c in [("$", 31), ("@", 32)]:
275 result = result.replace(u, f"\u001b[{c}m{u}\u001b[0m")
280 ######################################################################
282 if __name__ == "__main__":
283 nb, height, width, T = 25, 5, 7, 25
284 states, actions, rewards = generate_episodes(nb, height, width, T)
285 seq = episodes2seq(states, actions, rewards, lookahead_delta=T)
286 s, a, r, lr = seq2episodes(seq, height, width, lookahead=True)
287 print(episodes2str(s, a, r, lookahead_rewards=lr, unicode=True, ansi_colors=True))
289 # for s in seq2str(seq):