Oups
[picoclvr.git] / greed.py
1 #!/usr/bin/env python
2
3 # Any copyright is dedicated to the Public Domain.
4 # https://creativecommons.org/publicdomain/zero/1.0/
5
6 # Written by Francois Fleuret <francois@fleuret.org>
7
8 import torch
9
10 from torch.nn import functional as F
11
12 ######################################################################
13
14 REWARD_PLUS = 1
15 REWARD_NONE = 0
16 REWARD_MINUS = -1
17 REWARD_UNKNOWN = 2
18
19
20 class GreedWorld:
21     def __init__(self, height=6, width=6, T=10, nb_walls=3, nb_coins=2):
22         self.height = height
23         self.width = width
24         self.T = T
25         self.nb_walls = nb_walls
26         self.nb_coins = nb_coins
27
28         self.nb_states_codes = 5
29         self.nb_actions_codes = 5
30         self.nb_rewards_codes = 3
31         self.nb_lookahead_rewards_codes = 4  # stands for -1, 0, +1, and UNKNOWN
32
33         self.first_states_code = 0
34         self.first_actions_code = self.first_states_code + self.nb_states_codes
35         self.first_rewards_code = self.first_actions_code + self.nb_actions_codes
36         self.first_lookahead_rewards_code = (
37             self.first_rewards_code + self.nb_rewards_codes
38         )
39         self.nb_codes = (
40             self.first_lookahead_rewards_code + self.nb_lookahead_rewards_codes
41         )
42
43         self.state_len = self.height * self.width
44         self.index_lookahead_reward = 0
45         self.index_states = 1
46         self.index_reward = self.state_len + 1
47         self.index_action = self.state_len + 2
48         self.it_len = self.state_len + 3  # lookahead_reward / state / reward / action
49
50     def state2code(self, r):
51         return r + self.first_states_code
52
53     def code2state(self, r):
54         return r - self.first_states_code
55
56     def action2code(self, r):
57         return r + self.first_actions_code
58
59     def code2action(self, r):
60         return r - self.first_actions_code
61
62     def reward2code(self, r):
63         return r + 1 + self.first_rewards_code
64
65     def code2reward(self, r):
66         return r - self.first_rewards_code - 1
67
68     def lookahead_reward2code(self, r):
69         # -1, 0, +1 or 2 for UNKNOWN
70         return r + 1 + self.first_lookahead_rewards_code
71
72     def code2lookahead_reward(self, r):
73         return r - self.first_lookahead_rewards_code - 1
74
75     ######################################################################
76
77     def generate_episodes(self, nb):
78         rnd = torch.rand(nb, self.height, self.width)
79         rnd[:, 0, :] = 0
80         rnd[:, -1, :] = 0
81         rnd[:, :, 0] = 0
82         rnd[:, :, -1] = 0
83         wall = 0
84         for k in range(self.nb_walls):
85             wall = wall + (
86                 rnd.flatten(1).argmax(dim=1)[:, None]
87                 == torch.arange(rnd.flatten(1).size(1))[None, :]
88             ).long().reshape(rnd.size())
89
90             rnd = rnd * (1 - wall.clamp(max=1))
91
92         rnd = torch.rand(nb, self.height, self.width)
93         rnd[:, 0, 0] = 0  # Do not put coin at the agent's starting
94         # position
95         coins = torch.zeros(nb, self.T, self.height, self.width, dtype=torch.int64)
96         rnd = rnd * (1 - wall.clamp(max=1))
97         for k in range(self.nb_coins):
98             coins[:, 0] = coins[:, 0] + (
99                 rnd.flatten(1).argmax(dim=1)[:, None]
100                 == torch.arange(rnd.flatten(1).size(1))[None, :]
101             ).long().reshape(rnd.size())
102
103             rnd = rnd * (1 - coins[:, 0].clamp(max=1))
104
105         states = wall[:, None, :, :].expand(-1, self.T, -1, -1).clone()
106
107         agent = torch.zeros(states.size(), dtype=torch.int64)
108         agent[:, 0, 0, 0] = 1
109         agent_actions = torch.randint(5, (nb, self.T))
110         rewards = torch.zeros(nb, self.T, dtype=torch.int64)
111
112         troll = torch.zeros(states.size(), dtype=torch.int64)
113         troll[:, 0, -1, -1] = 1
114         troll_actions = torch.randint(5, (nb, self.T))
115
116         all_moves = agent.new(nb, 5, self.height, self.width)
117         for t in range(self.T - 1):
118             all_moves.zero_()
119             all_moves[:, 0] = agent[:, t]
120             all_moves[:, 1, 1:, :] = agent[:, t, :-1, :]
121             all_moves[:, 2, :-1, :] = agent[:, t, 1:, :]
122             all_moves[:, 3, :, 1:] = agent[:, t, :, :-1]
123             all_moves[:, 4, :, :-1] = agent[:, t, :, 1:]
124             a = F.one_hot(agent_actions[:, t], num_classes=5)[:, :, None, None]
125             after_move = (all_moves * a).sum(dim=1)
126             collision = (
127                 (after_move * (1 - wall) * (1 - troll[:, t]))
128                 .flatten(1)
129                 .sum(dim=1)[:, None, None]
130                 == 0
131             ).long()
132             agent[:, t + 1] = collision * agent[:, t] + (1 - collision) * after_move
133
134             all_moves.zero_()
135             all_moves[:, 0] = troll[:, t]
136             all_moves[:, 1, 1:, :] = troll[:, t, :-1, :]
137             all_moves[:, 2, :-1, :] = troll[:, t, 1:, :]
138             all_moves[:, 3, :, 1:] = troll[:, t, :, :-1]
139             all_moves[:, 4, :, :-1] = troll[:, t, :, 1:]
140             a = F.one_hot(troll_actions[:, t], num_classes=5)[:, :, None, None]
141             after_move = (all_moves * a).sum(dim=1)
142             collision = (
143                 (after_move * (1 - wall) * (1 - agent[:, t + 1]))
144                 .flatten(1)
145                 .sum(dim=1)[:, None, None]
146                 == 0
147             ).long()
148             troll[:, t + 1] = collision * troll[:, t] + (1 - collision) * after_move
149
150             hit = (
151                 (agent[:, t + 1, 1:, :] * troll[:, t + 1, :-1, :]).flatten(1).sum(dim=1)
152                 + (agent[:, t + 1, :-1, :] * troll[:, t + 1, 1:, :])
153                 .flatten(1)
154                 .sum(dim=1)
155                 + (agent[:, t + 1, :, 1:] * troll[:, t + 1, :, :-1])
156                 .flatten(1)
157                 .sum(dim=1)
158                 + (agent[:, t + 1, :, :-1] * troll[:, t + 1, :, 1:])
159                 .flatten(1)
160                 .sum(dim=1)
161             )
162             hit = (hit > 0).long()
163
164             # assert hit.min() == 0 and hit.max() <= 1
165
166             got_coin = (agent[:, t + 1] * coins[:, t]).flatten(1).sum(dim=1)
167             coins[:, t + 1] = coins[:, t] * (1 - agent[:, t + 1])
168
169             rewards[:, t + 1] = -hit + (1 - hit) * got_coin
170
171         states = states + 2 * agent + 3 * troll + 4 * coins * (1 - troll)
172
173         return states, agent_actions, rewards
174
175     ######################################################################
176
177     def episodes2seq(self, states, actions, rewards):
178         neg = rewards.new_zeros(rewards.size())
179         pos = rewards.new_zeros(rewards.size())
180         for t in range(neg.size(1)):
181             neg[:, t] = rewards[:, t:].min(dim=-1).values
182             pos[:, t] = rewards[:, t:].max(dim=-1).values
183         s = (neg < 0).long() * neg + (neg >= 0).long() * pos
184
185         return torch.cat(
186             [
187                 self.lookahead_reward2code(s[:, :, None]),
188                 self.state2code(states.flatten(2)),
189                 self.reward2code(rewards[:, :, None]),
190                 self.action2code(actions[:, :, None]),
191             ],
192             dim=2,
193         ).flatten(1)
194
195     def seq2episodes(self, seq):
196         seq = seq.reshape(seq.size(0), -1, self.height * self.width + 3)
197         lookahead_rewards = self.code2lookahead_reward(
198             seq[:, :, self.index_lookahead_reward]
199         )
200         states = self.code2state(
201             seq[:, :, self.index_states : self.height * self.width + self.index_states]
202         )
203         states = states.reshape(states.size(0), states.size(1), self.height, self.width)
204         actions = self.code2action(seq[:, :, self.index_action])
205         rewards = self.code2reward(seq[:, :, self.index_reward])
206         return lookahead_rewards, states, actions, rewards
207
208     def seq2str(self, seq):
209         def token2str(t):
210             if (
211                 t >= self.first_states_code
212                 and t < self.first_states_code + self.nb_states_codes
213             ):
214                 return "_#@T$"[t - self.first_states_code]
215             elif (
216                 t >= self.first_actions_code
217                 and t < self.first_actions_code + self.nb_actions_codes
218             ):
219                 return "ISNEW"[t - self.first_actions_code]
220             elif (
221                 t >= self.first_rewards_code
222                 and t < self.first_rewards_code + self.nb_rewards_codes
223             ):
224                 return "-0+"[t - self.first_rewards_code]
225             elif (
226                 t >= self.first_lookahead_rewards_code
227                 and t
228                 < self.first_lookahead_rewards_code + self.nb_lookahead_rewards_codes
229             ):
230                 return "n.pU"[t - self.first_lookahead_rewards_code]
231             else:
232                 return "?"
233
234         return ["".join([token2str(x.item()) for x in row]) for row in seq]
235
236     ######################################################################
237
238     def episodes2str(
239         self,
240         lookahead_rewards,
241         states,
242         actions,
243         rewards,
244         unicode=False,
245         ansi_colors=False,
246     ):
247         if unicode:
248             symbols = "·█@T$"
249             # vert, hori, cross, thin_hori = "║", "═", "╬", "─"
250             vert, hori, cross, thin_vert, thin_hori = "┃", "━", "╋", "│", "─"
251         else:
252             symbols = " #@T$"
253             vert, hori, cross, thin_vert, thin_hori = "|", "-", "+", "|", "-"
254
255         hline = (cross + hori * states.size(-1)) * states.size(1) + cross + "\n"
256
257         result = hline
258
259         for n in range(states.size(0)):
260
261             def state_symbol(v):
262                 v = v.item()
263                 return "?" if v < 0 or v >= len(symbols) else symbols[v]
264
265             for i in range(states.size(2)):
266                 result += (
267                     vert
268                     + vert.join(
269                         [
270                             "".join([state_symbol(v) for v in row])
271                             for row in states[n, :, i]
272                         ]
273                     )
274                     + vert
275                     + "\n"
276                 )
277
278             # result += (vert + thin_hori * states.size(-1)) * states.size(1) + vert + "\n"
279
280             def status_bar(a, r, lr=None):
281                 a, r = a.item(), r.item()
282                 sb_a = "ISNEW"[a] if a >= 0 and a < 5 else "?"
283                 sb_r = "- +"[r + 1] if r in {-1, 0, 1} else "?"
284                 if lr is None:
285                     sb_lr = ""
286                 else:
287                     lr = lr.item()
288                     sb_lr = "n pU"[lr + 1] if lr in {-1, 0, 1, 2} else "?"
289                 return (
290                     sb_a
291                     + "/"
292                     + sb_r
293                     + " " * (states.size(-1) - 1 - len(sb_a + sb_r + sb_lr))
294                     + sb_lr
295                 )
296
297             result += (
298                 vert
299                 + vert.join(
300                     [
301                         status_bar(a, r, lr)
302                         for a, r, lr in zip(
303                             actions[n], rewards[n], lookahead_rewards[n]
304                         )
305                     ]
306                 )
307                 + vert
308                 + "\n"
309             )
310
311             result += hline
312
313         if ansi_colors:
314             for u, c in [("T", 31), ("@", 32), ("$", 34)]:
315                 result = result.replace(u, f"\u001b[{c}m{u}\u001b[0m")
316
317         return result
318
319     ######################################################################
320
321     def save_seq_as_anim_script(self, seq, filename):
322         it_len = self.height * self.width + 3
323
324         seq = (
325             seq.reshape(seq.size(0), -1, it_len)
326             .permute(1, 0, 2)
327             .reshape(self.T, seq.size(0), -1)
328         )
329
330         with open(filename, "w") as f:
331             for t in range(self.T):
332                 # f.write("clear\n")
333                 f.write("cat << EOF\n")
334                 f.write("\u001b[H")
335                 # for i in range(seq.size(2)):
336                 # lr, s, a, r = seq2episodes(seq[t : t + 1, :, i], self.height, self.width)
337                 lr, s, a, r = self.seq2episodes(seq[t : t + 1, :].reshape(8, -1))
338                 f.write(self.episodes2str(lr, s, a, r, unicode=True, ansi_colors=True))
339                 f.write("EOF\n")
340                 f.write("sleep 0.25\n")
341             print(f"Saved {filename}")
342
343
344 if __name__ == "__main__":
345     gw = GreedWorld(height=5, width=7, T=10, nb_walls=4, nb_coins=2)
346     states, actions, rewards = gw.generate_episodes(nb=6)
347     seq = gw.episodes2seq(states, actions, rewards)
348     lr, s, a, r = gw.seq2episodes(seq)
349     print(gw.episodes2str(lr, s, a, r, unicode=True, ansi_colors=True))
350
351     print()
352     for s in gw.seq2str(seq):
353         print(s)
354
355     gw = GreedWorld(height=5, width=7, T=100, nb_walls=4, nb_coins=2)
356     states, actions, rewards = gw.generate_episodes(nb=128)
357     seq = gw.episodes2seq(states, actions, rewards)
358     gw.save_seq_as_anim_script(seq, "anim.sh")