3 # Any copyright is dedicated to the Public Domain.
4 # https://creativecommons.org/publicdomain/zero/1.0/
6 # Written by Francois Fleuret <francois@fleuret.org>
10 from torch.nn import functional as F
12 ######################################################################
16 def __init__(self, height=6, width=6, T=10, nb_walls=3, nb_coins=2):
20 self.nb_walls = nb_walls
21 self.nb_coins = nb_coins
23 self.nb_states_codes = 5
24 self.nb_actions_codes = 5
25 self.nb_rewards_codes = 3
26 self.nb_lookahead_rewards_codes = 4 # stands for -1, 0, +1, and UNKNOWN
28 self.first_states_code = 0
29 self.first_actions_code = self.first_states_code + self.nb_states_codes
30 self.first_rewards_code = self.first_actions_code + self.nb_actions_codes
31 self.first_lookahead_rewards_code = (
32 self.first_rewards_code + self.nb_rewards_codes
35 self.first_lookahead_rewards_code + self.nb_lookahead_rewards_codes
38 self.state_len = self.height * self.width
40 self.index_reward = self.state_len
41 self.index_lookahead_reward = self.state_len + 1
42 self.index_action = self.state_len + 2
43 self.it_len = self.state_len + 3 # lookahead_reward / state / action / reward
45 def state2code(self, r):
46 return r + self.first_states_code
48 def code2state(self, r):
49 return r - self.first_states_code
51 def action2code(self, r):
52 return r + self.first_actions_code
54 def code2action(self, r):
55 return r - self.first_actions_code
57 def reward2code(self, r):
58 return r + 1 + self.first_rewards_code
60 def code2reward(self, r):
61 return r - self.first_rewards_code - 1
63 def lookahead_reward2code(self, r):
64 # -1, 0, +1 or 2 for UNKNOWN
65 return r + 1 + self.first_lookahead_rewards_code
67 def code2lookahead_reward(self, r):
68 return r - self.first_lookahead_rewards_code - 1
70 ######################################################################
72 def generate_episodes(self, nb):
73 rnd = torch.rand(nb, self.height, self.width)
79 for k in range(self.nb_walls):
81 rnd.flatten(1).argmax(dim=1)[:, None]
82 == torch.arange(rnd.flatten(1).size(1))[None, :]
83 ).long().reshape(rnd.size())
85 rnd = rnd * (1 - wall.clamp(max=1))
87 rnd = torch.rand(nb, self.height, self.width)
88 rnd[:, 0, 0] = 0 # Do not put coin at the agent's starting
90 coins = torch.zeros(nb, self.T, self.height, self.width, dtype=torch.int64)
91 rnd = rnd * (1 - wall.clamp(max=1))
92 for k in range(self.nb_coins):
93 coins[:, 0] = coins[:, 0] + (
94 rnd.flatten(1).argmax(dim=1)[:, None]
95 == torch.arange(rnd.flatten(1).size(1))[None, :]
96 ).long().reshape(rnd.size())
98 rnd = rnd * (1 - coins[:, 0].clamp(max=1))
100 states = wall[:, None, :, :].expand(-1, self.T, -1, -1).clone()
102 agent = torch.zeros(states.size(), dtype=torch.int64)
103 agent[:, 0, 0, 0] = 1
104 agent_actions = torch.randint(5, (nb, self.T))
105 rewards = torch.zeros(nb, self.T, dtype=torch.int64)
107 troll = torch.zeros(states.size(), dtype=torch.int64)
108 troll[:, 0, -1, -1] = 1
109 troll_actions = torch.randint(5, (nb, self.T))
111 all_moves = agent.new(nb, 5, self.height, self.width)
112 for t in range(self.T - 1):
114 all_moves[:, 0] = agent[:, t]
115 all_moves[:, 1, 1:, :] = agent[:, t, :-1, :]
116 all_moves[:, 2, :-1, :] = agent[:, t, 1:, :]
117 all_moves[:, 3, :, 1:] = agent[:, t, :, :-1]
118 all_moves[:, 4, :, :-1] = agent[:, t, :, 1:]
119 a = F.one_hot(agent_actions[:, t], num_classes=5)[:, :, None, None]
120 after_move = (all_moves * a).sum(dim=1)
122 (after_move * (1 - wall) * (1 - troll[:, t]))
124 .sum(dim=1)[:, None, None]
127 agent[:, t + 1] = collision * agent[:, t] + (1 - collision) * after_move
130 all_moves[:, 0] = troll[:, t]
131 all_moves[:, 1, 1:, :] = troll[:, t, :-1, :]
132 all_moves[:, 2, :-1, :] = troll[:, t, 1:, :]
133 all_moves[:, 3, :, 1:] = troll[:, t, :, :-1]
134 all_moves[:, 4, :, :-1] = troll[:, t, :, 1:]
135 a = F.one_hot(troll_actions[:, t], num_classes=5)[:, :, None, None]
136 after_move = (all_moves * a).sum(dim=1)
138 (after_move * (1 - wall) * (1 - agent[:, t + 1]))
140 .sum(dim=1)[:, None, None]
143 troll[:, t + 1] = collision * troll[:, t] + (1 - collision) * after_move
146 (agent[:, t + 1, 1:, :] * troll[:, t + 1, :-1, :]).flatten(1).sum(dim=1)
147 + (agent[:, t + 1, :-1, :] * troll[:, t + 1, 1:, :])
150 + (agent[:, t + 1, :, 1:] * troll[:, t + 1, :, :-1])
153 + (agent[:, t + 1, :, :-1] * troll[:, t + 1, :, 1:])
157 hit = (hit > 0).long()
159 # assert hit.min() == 0 and hit.max() <= 1
161 got_coin = (agent[:, t + 1] * coins[:, t]).flatten(1).sum(dim=1)
162 coins[:, t + 1] = coins[:, t] * (1 - agent[:, t + 1])
164 rewards[:, t + 1] = -hit + (1 - hit) * got_coin
166 states = states + 2 * agent + 3 * troll + 4 * coins * (1 - troll)
168 return states, agent_actions, rewards
170 ######################################################################
172 def episodes2seq(self, states, actions, rewards):
173 neg = rewards.new_zeros(rewards.size())
174 pos = rewards.new_zeros(rewards.size())
175 for t in range(neg.size(1) - 1):
176 neg[:, t] = rewards[:, t:].min(dim=-1).values
177 pos[:, t] = rewards[:, t:].max(dim=-1).values
178 s = (neg < 0).long() * neg + (neg >= 0).long() * pos
182 self.state2code(states.flatten(2)),
183 self.reward2code(rewards[:, :, None]),
184 self.lookahead_reward2code(s[:, :, None]),
185 self.action2code(actions[:, :, None]),
190 def seq2episodes(self, seq):
191 seq = seq.reshape(seq.size(0), -1, self.height * self.width + 3)
192 lookahead_rewards = self.code2lookahead_reward(seq[:, :, 0])
193 states = self.code2state(seq[:, :, 1 : self.height * self.width + 1])
194 states = states.reshape(states.size(0), states.size(1), self.height, self.width)
195 actions = self.code2action(seq[:, :, self.height * self.width + 1])
196 rewards = self.code2reward(seq[:, :, self.height * self.width + 2])
197 return lookahead_rewards, states, actions, rewards
199 def seq2str(self, seq):
202 t >= self.first_states_code
203 and t < self.first_states_code + self.nb_states_codes
205 return "_#@T$"[t - self.first_states_code]
207 t >= self.first_actions_code
208 and t < self.first_actions_code + self.nb_actions_codes
210 return "ISNEW"[t - self.first_actions_code]
212 t >= self.first_rewards_code
213 and t < self.first_rewards_code + self.nb_rewards_codes
215 return "-0+"[t - self.first_rewards_code]
217 t >= self.first_lookahead_rewards_code
219 < self.first_lookahead_rewards_code + self.nb_lookahead_rewards_codes
221 return "n.pU"[t - self.first_lookahead_rewards_code]
225 return ["".join([token2str(x.item()) for x in row]) for row in seq]
227 ######################################################################
240 # vert, hori, cross, thin_hori = "║", "═", "╬", "─"
241 vert, hori, cross, thin_vert, thin_hori = "┃", "━", "╋", "│", "─"
244 vert, hori, cross, thin_vert, thin_hori = "|", "-", "+", "|", "-"
246 hline = (cross + hori * states.size(-1)) * states.size(1) + cross + "\n"
250 for n in range(states.size(0)):
254 return "?" if v < 0 or v >= len(symbols) else symbols[v]
256 for i in range(states.size(2)):
261 "".join([state_symbol(v) for v in row])
262 for row in states[n, :, i]
269 # result += (vert + thin_hori * states.size(-1)) * states.size(1) + vert + "\n"
271 def status_bar(a, r, lr=None):
272 a, r = a.item(), r.item()
273 sb_a = "ISNEW"[a] if a >= 0 and a < 5 else "?"
274 sb_r = "- +"[r + 1] if r in {-1, 0, 1} else "?"
279 sb_lr = "n pU"[lr + 1] if lr in {-1, 0, 1, 2} else "?"
284 + " " * (states.size(-1) - 1 - len(sb_a + sb_r + sb_lr))
294 actions[n], rewards[n], lookahead_rewards[n]
305 for u, c in [("T", 31), ("@", 32), ("$", 34)]:
306 result = result.replace(u, f"\u001b[{c}m{u}\u001b[0m")
310 ######################################################################
312 def save_seq_as_anim_script(self, seq, filename):
313 it_len = self.height * self.width + 3
316 seq.reshape(seq.size(0), -1, it_len)
318 .reshape(self.T, seq.size(0), -1)
321 with open(filename, "w") as f:
322 for t in range(self.T):
324 f.write("cat << EOF\n")
326 # for i in range(seq.size(2)):
327 # lr, s, a, r = seq2episodes(seq[t : t + 1, :, i], self.height, self.width)
328 lr, s, a, r = self.seq2episodes(seq[t : t + 1, :].reshape(8, -1))
329 f.write(self.episodes2str(lr, s, a, r, unicode=True, ansi_colors=True))
331 f.write("sleep 0.25\n")
332 print(f"Saved {filename}")
335 if __name__ == "__main__":
336 gw = GreedWorld(height=5, width=7, T=10, nb_walls=4, nb_coins=2)
337 states, actions, rewards = gw.generate_episodes(nb=6)
338 seq = gw.episodes2seq(states, actions, rewards)
339 lr, s, a, r = gw.seq2episodes(seq)
340 print(gw.episodes2str(lr, s, a, r, unicode=True, ansi_colors=True))
343 for s in gw.seq2str(seq):
346 gw = GreedWorld(height=5, width=7, T=100, nb_walls=4, nb_coins=2)
347 states, actions, rewards = gw.generate_episodes(nb=128)
348 seq = gw.episodes2seq(states, actions, rewards)
349 gw.save_seq_as_anim_script(seq, "anim.sh")