3 # Any copyright is dedicated to the Public Domain.
4 # https://creativecommons.org/publicdomain/zero/1.0/
6 # Written by Francois Fleuret <francois@fleuret.org>
10 from torch.nn import functional as F
12 ######################################################################
21 def __init__(self, height=6, width=6, T=10, nb_walls=3, nb_coins=2):
25 self.nb_walls = nb_walls
26 self.nb_coins = nb_coins
28 self.nb_states_codes = 5
29 self.nb_actions_codes = 5
30 self.nb_rewards_codes = 3
31 self.nb_lookahead_rewards_codes = 4 # stands for -1, 0, +1, and UNKNOWN
33 self.first_states_code = 0
34 self.first_actions_code = self.first_states_code + self.nb_states_codes
35 self.first_rewards_code = self.first_actions_code + self.nb_actions_codes
36 self.first_lookahead_rewards_code = (
37 self.first_rewards_code + self.nb_rewards_codes
40 self.first_lookahead_rewards_code + self.nb_lookahead_rewards_codes
43 self.state_len = self.height * self.width
44 self.index_lookahead_reward = 0
46 self.index_reward = self.state_len + 1
47 self.index_action = self.state_len + 2
48 self.it_len = self.state_len + 3 # lookahead_reward / state / reward / action
50 def state2code(self, r):
51 return r + self.first_states_code
53 def code2state(self, r):
54 return r - self.first_states_code
56 def action2code(self, r):
57 return r + self.first_actions_code
59 def code2action(self, r):
60 return r - self.first_actions_code
62 def reward2code(self, r):
63 return r + 1 + self.first_rewards_code
65 def code2reward(self, r):
66 return r - self.first_rewards_code - 1
68 def lookahead_reward2code(self, r):
69 # -1, 0, +1 or 2 for UNKNOWN
70 return r + 1 + self.first_lookahead_rewards_code
72 def code2lookahead_reward(self, r):
73 return r - self.first_lookahead_rewards_code - 1
75 ######################################################################
77 def generate_episodes(self, nb):
78 rnd = torch.rand(nb, self.height, self.width)
84 for k in range(self.nb_walls):
86 rnd.flatten(1).argmax(dim=1)[:, None]
87 == torch.arange(rnd.flatten(1).size(1))[None, :]
88 ).long().reshape(rnd.size())
90 rnd = rnd * (1 - wall.clamp(max=1))
92 rnd = torch.rand(nb, self.height, self.width)
93 rnd[:, 0, 0] = 0 # Do not put coin at the agent's starting
95 coins = torch.zeros(nb, self.T, self.height, self.width, dtype=torch.int64)
96 rnd = rnd * (1 - wall.clamp(max=1))
97 for k in range(self.nb_coins):
98 coins[:, 0] = coins[:, 0] + (
99 rnd.flatten(1).argmax(dim=1)[:, None]
100 == torch.arange(rnd.flatten(1).size(1))[None, :]
101 ).long().reshape(rnd.size())
103 rnd = rnd * (1 - coins[:, 0].clamp(max=1))
105 states = wall[:, None, :, :].expand(-1, self.T, -1, -1).clone()
107 agent = torch.zeros(states.size(), dtype=torch.int64)
108 agent[:, 0, 0, 0] = 1
109 agent_actions = torch.randint(5, (nb, self.T))
110 rewards = torch.zeros(nb, self.T, dtype=torch.int64)
112 troll = torch.zeros(states.size(), dtype=torch.int64)
113 troll[:, 0, -1, -1] = 1
114 troll_actions = torch.randint(5, (nb, self.T))
116 all_moves = agent.new(nb, 5, self.height, self.width)
117 for t in range(self.T - 1):
119 all_moves[:, 0] = agent[:, t]
120 all_moves[:, 1, 1:, :] = agent[:, t, :-1, :]
121 all_moves[:, 2, :-1, :] = agent[:, t, 1:, :]
122 all_moves[:, 3, :, 1:] = agent[:, t, :, :-1]
123 all_moves[:, 4, :, :-1] = agent[:, t, :, 1:]
124 a = F.one_hot(agent_actions[:, t], num_classes=5)[:, :, None, None]
125 after_move = (all_moves * a).sum(dim=1)
127 (after_move * (1 - wall) * (1 - troll[:, t]))
129 .sum(dim=1)[:, None, None]
132 agent[:, t + 1] = collision * agent[:, t] + (1 - collision) * after_move
135 all_moves[:, 0] = troll[:, t]
136 all_moves[:, 1, 1:, :] = troll[:, t, :-1, :]
137 all_moves[:, 2, :-1, :] = troll[:, t, 1:, :]
138 all_moves[:, 3, :, 1:] = troll[:, t, :, :-1]
139 all_moves[:, 4, :, :-1] = troll[:, t, :, 1:]
140 a = F.one_hot(troll_actions[:, t], num_classes=5)[:, :, None, None]
141 after_move = (all_moves * a).sum(dim=1)
143 (after_move * (1 - wall) * (1 - agent[:, t + 1]))
145 .sum(dim=1)[:, None, None]
148 troll[:, t + 1] = collision * troll[:, t] + (1 - collision) * after_move
151 (agent[:, t + 1, 1:, :] * troll[:, t + 1, :-1, :]).flatten(1).sum(dim=1)
152 + (agent[:, t + 1, :-1, :] * troll[:, t + 1, 1:, :])
155 + (agent[:, t + 1, :, 1:] * troll[:, t + 1, :, :-1])
158 + (agent[:, t + 1, :, :-1] * troll[:, t + 1, :, 1:])
162 hit = (hit > 0).long()
164 # assert hit.min() == 0 and hit.max() <= 1
166 got_coin = (agent[:, t + 1] * coins[:, t]).flatten(1).sum(dim=1)
167 coins[:, t + 1] = coins[:, t] * (1 - agent[:, t + 1])
169 rewards[:, t + 1] = -hit + (1 - hit) * got_coin
171 states = states + 2 * agent + 3 * troll + 4 * coins * (1 - troll)
173 return states, agent_actions, rewards
175 ######################################################################
177 def episodes2seq(self, states, actions, rewards):
178 neg = rewards.new_zeros(rewards.size())
179 pos = rewards.new_zeros(rewards.size())
180 for t in range(neg.size(1)):
181 neg[:, t] = rewards[:, t:].min(dim=-1).values
182 pos[:, t] = rewards[:, t:].max(dim=-1).values
183 s = (neg < 0).long() * neg + (neg >= 0).long() * pos
187 self.lookahead_reward2code(s[:, :, None]),
188 self.state2code(states.flatten(2)),
189 self.reward2code(rewards[:, :, None]),
190 self.action2code(actions[:, :, None]),
195 def seq2episodes(self, seq):
196 seq = seq.reshape(seq.size(0), -1, self.height * self.width + 3)
197 lookahead_rewards = self.code2lookahead_reward(
198 seq[:, :, self.index_lookahead_reward]
200 states = self.code2state(
201 seq[:, :, self.index_states : self.height * self.width + self.index_states]
203 states = states.reshape(states.size(0), states.size(1), self.height, self.width)
204 actions = self.code2action(seq[:, :, self.index_action])
205 rewards = self.code2reward(seq[:, :, self.index_reward])
206 return lookahead_rewards, states, actions, rewards
208 def seq2str(self, seq):
211 t >= self.first_states_code
212 and t < self.first_states_code + self.nb_states_codes
214 return "_#@T$"[t - self.first_states_code]
216 t >= self.first_actions_code
217 and t < self.first_actions_code + self.nb_actions_codes
219 return "ISNEW"[t - self.first_actions_code]
221 t >= self.first_rewards_code
222 and t < self.first_rewards_code + self.nb_rewards_codes
224 return "-0+"[t - self.first_rewards_code]
226 t >= self.first_lookahead_rewards_code
228 < self.first_lookahead_rewards_code + self.nb_lookahead_rewards_codes
230 return "n.pU"[t - self.first_lookahead_rewards_code]
234 return ["".join([token2str(x.item()) for x in row]) for row in seq]
236 ######################################################################
249 # vert, hori, cross, thin_hori = "║", "═", "╬", "─"
250 vert, hori, cross, thin_vert, thin_hori = "┃", "━", "╋", "│", "─"
253 vert, hori, cross, thin_vert, thin_hori = "|", "-", "+", "|", "-"
255 hline = (cross + hori * states.size(-1)) * states.size(1) + cross + "\n"
259 for n in range(states.size(0)):
263 return "?" if v < 0 or v >= len(symbols) else symbols[v]
265 for i in range(states.size(2)):
270 "".join([state_symbol(v) for v in row])
271 for row in states[n, :, i]
278 # result += (vert + thin_hori * states.size(-1)) * states.size(1) + vert + "\n"
280 def status_bar(a, r, lr=None):
281 a, r = a.item(), r.item()
282 sb_a = "ISNEW"[a] if a >= 0 and a < 5 else "?"
283 sb_r = "- +"[r + 1] if r in {-1, 0, 1} else "?"
288 sb_lr = "n pU"[lr + 1] if lr in {-1, 0, 1, 2} else "?"
293 + " " * (states.size(-1) - 1 - len(sb_a + sb_r + sb_lr))
303 actions[n], rewards[n], lookahead_rewards[n]
314 for u, c in [("T", 31), ("@", 32), ("$", 34)]:
315 result = result.replace(u, f"\u001b[{c}m{u}\u001b[0m")
319 ######################################################################
321 def save_seq_as_anim_script(self, seq, filename):
322 it_len = self.height * self.width + 3
325 seq.reshape(seq.size(0), -1, it_len)
327 .reshape(self.T, seq.size(0), -1)
330 with open(filename, "w") as f:
331 for t in range(self.T):
333 f.write("cat << EOF\n")
335 # for i in range(seq.size(2)):
336 # lr, s, a, r = seq2episodes(seq[t : t + 1, :, i], self.height, self.width)
337 lr, s, a, r = self.seq2episodes(seq[t : t + 1, :].reshape(8, -1))
338 f.write(self.episodes2str(lr, s, a, r, unicode=True, ansi_colors=True))
340 f.write("sleep 0.25\n")
341 print(f"Saved {filename}")
344 if __name__ == "__main__":
345 gw = GreedWorld(height=5, width=7, T=10, nb_walls=4, nb_coins=2)
346 states, actions, rewards = gw.generate_episodes(nb=6)
347 seq = gw.episodes2seq(states, actions, rewards)
348 lr, s, a, r = gw.seq2episodes(seq)
349 print(gw.episodes2str(lr, s, a, r, unicode=True, ansi_colors=True))
352 for s in gw.seq2str(seq):
355 gw = GreedWorld(height=5, width=7, T=100, nb_walls=4, nb_coins=2)
356 states, actions, rewards = gw.generate_episodes(nb=128)
357 seq = gw.episodes2seq(states, actions, rewards)
358 gw.save_seq_as_anim_script(seq, "anim.sh")