636c13b3cbd09580d65b18b5b701c8510827f732
[picoclvr.git] / greed.py
1 #!/usr/bin/env python
2
3 # Any copyright is dedicated to the Public Domain.
4 # https://creativecommons.org/publicdomain/zero/1.0/
5
6 # Written by Francois Fleuret <francois@fleuret.org>
7
8 import torch
9
10 from torch.nn import functional as F
11
12 ######################################################################
13
14
15 class GreedWorld:
16     def __init__(self, height=6, width=6, T=10, nb_walls=3, nb_coins=2):
17         self.height = height
18         self.width = width
19         self.T = T
20         self.nb_walls = nb_walls
21         self.nb_coins = nb_coins
22
23         self.nb_states_codes = 5
24         self.nb_actions_codes = 5
25         self.nb_rewards_codes = 3
26         self.nb_lookahead_rewards_codes = 4  # stands for -1, 0, +1, and UNKNOWN
27
28         self.first_states_code = 0
29         self.first_actions_code = self.first_states_code + self.nb_states_codes
30         self.first_rewards_code = self.first_actions_code + self.nb_actions_codes
31         self.first_lookahead_rewards_code = (
32             self.first_rewards_code + self.nb_rewards_codes
33         )
34         self.nb_codes = (
35             self.first_lookahead_rewards_code + self.nb_lookahead_rewards_codes
36         )
37
38         self.state_len = self.height * self.width
39         self.index_states = 0
40         self.index_reward = self.state_len
41         self.index_lookahead_reward = self.state_len + 1
42         self.index_action = self.state_len + 2
43         self.it_len = self.state_len + 3  # lookahead_reward / state / action / reward
44
45     def state2code(self, r):
46         return r + self.first_states_code
47
48     def code2state(self, r):
49         return r - self.first_states_code
50
51     def action2code(self, r):
52         return r + self.first_actions_code
53
54     def code2action(self, r):
55         return r - self.first_actions_code
56
57     def reward2code(self, r):
58         return r + 1 + self.first_rewards_code
59
60     def code2reward(self, r):
61         return r - self.first_rewards_code - 1
62
63     def lookahead_reward2code(self, r):
64         # -1, 0, +1 or 2 for UNKNOWN
65         return r + 1 + self.first_lookahead_rewards_code
66
67     def code2lookahead_reward(self, r):
68         return r - self.first_lookahead_rewards_code - 1
69
70     ######################################################################
71
72     def generate_episodes(self, nb):
73         rnd = torch.rand(nb, self.height, self.width)
74         rnd[:, 0, :] = 0
75         rnd[:, -1, :] = 0
76         rnd[:, :, 0] = 0
77         rnd[:, :, -1] = 0
78         wall = 0
79         for k in range(self.nb_walls):
80             wall = wall + (
81                 rnd.flatten(1).argmax(dim=1)[:, None]
82                 == torch.arange(rnd.flatten(1).size(1))[None, :]
83             ).long().reshape(rnd.size())
84
85             rnd = rnd * (1 - wall.clamp(max=1))
86
87         rnd = torch.rand(nb, self.height, self.width)
88         rnd[:, 0, 0] = 0  # Do not put coin at the agent's starting
89         # position
90         coins = torch.zeros(nb, self.T, self.height, self.width, dtype=torch.int64)
91         rnd = rnd * (1 - wall.clamp(max=1))
92         for k in range(self.nb_coins):
93             coins[:, 0] = coins[:, 0] + (
94                 rnd.flatten(1).argmax(dim=1)[:, None]
95                 == torch.arange(rnd.flatten(1).size(1))[None, :]
96             ).long().reshape(rnd.size())
97
98             rnd = rnd * (1 - coins[:, 0].clamp(max=1))
99
100         states = wall[:, None, :, :].expand(-1, self.T, -1, -1).clone()
101
102         agent = torch.zeros(states.size(), dtype=torch.int64)
103         agent[:, 0, 0, 0] = 1
104         agent_actions = torch.randint(5, (nb, self.T))
105         rewards = torch.zeros(nb, self.T, dtype=torch.int64)
106
107         troll = torch.zeros(states.size(), dtype=torch.int64)
108         troll[:, 0, -1, -1] = 1
109         troll_actions = torch.randint(5, (nb, self.T))
110
111         all_moves = agent.new(nb, 5, self.height, self.width)
112         for t in range(self.T - 1):
113             all_moves.zero_()
114             all_moves[:, 0] = agent[:, t]
115             all_moves[:, 1, 1:, :] = agent[:, t, :-1, :]
116             all_moves[:, 2, :-1, :] = agent[:, t, 1:, :]
117             all_moves[:, 3, :, 1:] = agent[:, t, :, :-1]
118             all_moves[:, 4, :, :-1] = agent[:, t, :, 1:]
119             a = F.one_hot(agent_actions[:, t], num_classes=5)[:, :, None, None]
120             after_move = (all_moves * a).sum(dim=1)
121             collision = (
122                 (after_move * (1 - wall) * (1 - troll[:, t]))
123                 .flatten(1)
124                 .sum(dim=1)[:, None, None]
125                 == 0
126             ).long()
127             agent[:, t + 1] = collision * agent[:, t] + (1 - collision) * after_move
128
129             all_moves.zero_()
130             all_moves[:, 0] = troll[:, t]
131             all_moves[:, 1, 1:, :] = troll[:, t, :-1, :]
132             all_moves[:, 2, :-1, :] = troll[:, t, 1:, :]
133             all_moves[:, 3, :, 1:] = troll[:, t, :, :-1]
134             all_moves[:, 4, :, :-1] = troll[:, t, :, 1:]
135             a = F.one_hot(troll_actions[:, t], num_classes=5)[:, :, None, None]
136             after_move = (all_moves * a).sum(dim=1)
137             collision = (
138                 (after_move * (1 - wall) * (1 - agent[:, t + 1]))
139                 .flatten(1)
140                 .sum(dim=1)[:, None, None]
141                 == 0
142             ).long()
143             troll[:, t + 1] = collision * troll[:, t] + (1 - collision) * after_move
144
145             hit = (
146                 (agent[:, t + 1, 1:, :] * troll[:, t + 1, :-1, :]).flatten(1).sum(dim=1)
147                 + (agent[:, t + 1, :-1, :] * troll[:, t + 1, 1:, :])
148                 .flatten(1)
149                 .sum(dim=1)
150                 + (agent[:, t + 1, :, 1:] * troll[:, t + 1, :, :-1])
151                 .flatten(1)
152                 .sum(dim=1)
153                 + (agent[:, t + 1, :, :-1] * troll[:, t + 1, :, 1:])
154                 .flatten(1)
155                 .sum(dim=1)
156             )
157             hit = (hit > 0).long()
158
159             # assert hit.min() == 0 and hit.max() <= 1
160
161             got_coin = (agent[:, t + 1] * coins[:, t]).flatten(1).sum(dim=1)
162             coins[:, t + 1] = coins[:, t] * (1 - agent[:, t + 1])
163
164             rewards[:, t + 1] = -hit + (1 - hit) * got_coin
165
166         states = states + 2 * agent + 3 * troll + 4 * coins * (1 - troll)
167
168         return states, agent_actions, rewards
169
170     ######################################################################
171
172     def episodes2seq(self, states, actions, rewards):
173         neg = rewards.new_zeros(rewards.size())
174         pos = rewards.new_zeros(rewards.size())
175         for t in range(neg.size(1)):
176             neg[:, t] = rewards[:, t:].min(dim=-1).values
177             pos[:, t] = rewards[:, t:].max(dim=-1).values
178         s = (neg < 0).long() * neg + (neg >= 0).long() * pos
179
180         return torch.cat(
181             [
182                 self.state2code(states.flatten(2)),
183                 self.reward2code(rewards[:, :, None]),
184                 self.lookahead_reward2code(s[:, :, None]),
185                 self.action2code(actions[:, :, None]),
186             ],
187             dim=2,
188         ).flatten(1)
189
190     def seq2episodes(self, seq):
191         seq = seq.reshape(seq.size(0), -1, self.height * self.width + 3)
192         lookahead_rewards = self.code2lookahead_reward(
193             seq[:, :, self.index_lookahead_reward]
194         )
195         states = self.code2state(
196             seq[:, :, self.index_states : self.height * self.width + self.index_states]
197         )
198         states = states.reshape(states.size(0), states.size(1), self.height, self.width)
199         actions = self.code2action(seq[:, :, self.index_action])
200         rewards = self.code2reward(seq[:, :, self.index_reward])
201         return lookahead_rewards, states, actions, rewards
202
203     def seq2str(self, seq):
204         def token2str(t):
205             if (
206                 t >= self.first_states_code
207                 and t < self.first_states_code + self.nb_states_codes
208             ):
209                 return "_#@T$"[t - self.first_states_code]
210             elif (
211                 t >= self.first_actions_code
212                 and t < self.first_actions_code + self.nb_actions_codes
213             ):
214                 return "ISNEW"[t - self.first_actions_code]
215             elif (
216                 t >= self.first_rewards_code
217                 and t < self.first_rewards_code + self.nb_rewards_codes
218             ):
219                 return "-0+"[t - self.first_rewards_code]
220             elif (
221                 t >= self.first_lookahead_rewards_code
222                 and t
223                 < self.first_lookahead_rewards_code + self.nb_lookahead_rewards_codes
224             ):
225                 return "n.pU"[t - self.first_lookahead_rewards_code]
226             else:
227                 return "?"
228
229         return ["".join([token2str(x.item()) for x in row]) for row in seq]
230
231     ######################################################################
232
233     def episodes2str(
234         self,
235         lookahead_rewards,
236         states,
237         actions,
238         rewards,
239         unicode=False,
240         ansi_colors=False,
241     ):
242         if unicode:
243             symbols = "·█@T$"
244             # vert, hori, cross, thin_hori = "║", "═", "╬", "─"
245             vert, hori, cross, thin_vert, thin_hori = "┃", "━", "╋", "│", "─"
246         else:
247             symbols = " #@T$"
248             vert, hori, cross, thin_vert, thin_hori = "|", "-", "+", "|", "-"
249
250         hline = (cross + hori * states.size(-1)) * states.size(1) + cross + "\n"
251
252         result = hline
253
254         for n in range(states.size(0)):
255
256             def state_symbol(v):
257                 v = v.item()
258                 return "?" if v < 0 or v >= len(symbols) else symbols[v]
259
260             for i in range(states.size(2)):
261                 result += (
262                     vert
263                     + vert.join(
264                         [
265                             "".join([state_symbol(v) for v in row])
266                             for row in states[n, :, i]
267                         ]
268                     )
269                     + vert
270                     + "\n"
271                 )
272
273             # result += (vert + thin_hori * states.size(-1)) * states.size(1) + vert + "\n"
274
275             def status_bar(a, r, lr=None):
276                 a, r = a.item(), r.item()
277                 sb_a = "ISNEW"[a] if a >= 0 and a < 5 else "?"
278                 sb_r = "- +"[r + 1] if r in {-1, 0, 1} else "?"
279                 if lr is None:
280                     sb_lr = ""
281                 else:
282                     lr = lr.item()
283                     sb_lr = "n pU"[lr + 1] if lr in {-1, 0, 1, 2} else "?"
284                 return (
285                     sb_a
286                     + "/"
287                     + sb_r
288                     + " " * (states.size(-1) - 1 - len(sb_a + sb_r + sb_lr))
289                     + sb_lr
290                 )
291
292             result += (
293                 vert
294                 + vert.join(
295                     [
296                         status_bar(a, r, lr)
297                         for a, r, lr in zip(
298                             actions[n], rewards[n], lookahead_rewards[n]
299                         )
300                     ]
301                 )
302                 + vert
303                 + "\n"
304             )
305
306             result += hline
307
308         if ansi_colors:
309             for u, c in [("T", 31), ("@", 32), ("$", 34)]:
310                 result = result.replace(u, f"\u001b[{c}m{u}\u001b[0m")
311
312         return result
313
314     ######################################################################
315
316     def save_seq_as_anim_script(self, seq, filename):
317         it_len = self.height * self.width + 3
318
319         seq = (
320             seq.reshape(seq.size(0), -1, it_len)
321             .permute(1, 0, 2)
322             .reshape(self.T, seq.size(0), -1)
323         )
324
325         with open(filename, "w") as f:
326             for t in range(self.T):
327                 # f.write("clear\n")
328                 f.write("cat << EOF\n")
329                 f.write("\u001b[H")
330                 # for i in range(seq.size(2)):
331                 # lr, s, a, r = seq2episodes(seq[t : t + 1, :, i], self.height, self.width)
332                 lr, s, a, r = self.seq2episodes(seq[t : t + 1, :].reshape(8, -1))
333                 f.write(self.episodes2str(lr, s, a, r, unicode=True, ansi_colors=True))
334                 f.write("EOF\n")
335                 f.write("sleep 0.25\n")
336             print(f"Saved {filename}")
337
338
339 if __name__ == "__main__":
340     gw = GreedWorld(height=5, width=7, T=10, nb_walls=4, nb_coins=2)
341     states, actions, rewards = gw.generate_episodes(nb=6)
342     seq = gw.episodes2seq(states, actions, rewards)
343     lr, s, a, r = gw.seq2episodes(seq)
344     print(gw.episodes2str(lr, s, a, r, unicode=True, ansi_colors=True))
345
346     print()
347     for s in gw.seq2str(seq):
348         print(s)
349
350     gw = GreedWorld(height=5, width=7, T=100, nb_walls=4, nb_coins=2)
351     states, actions, rewards = gw.generate_episodes(nb=128)
352     seq = gw.episodes2seq(states, actions, rewards)
353     gw.save_seq_as_anim_script(seq, "anim.sh")