47cfb40bb00d638a8a9a8d38f69e8afe8fc7c55f
[picoclvr.git] / greed.py
1 #!/usr/bin/env python
2
3 # Any copyright is dedicated to the Public Domain.
4 # https://creativecommons.org/publicdomain/zero/1.0/
5
6 # Written by Francois Fleuret <francois@fleuret.org>
7
8 import torch
9
10 from torch.nn import functional as F
11
12 ######################################################################
13
14
15 class GreedWorld:
16     def __init__(self, height=6, width=6, T=10, nb_walls=3, nb_coins=2):
17         self.height = height
18         self.width = width
19         self.T = T
20         self.nb_walls = nb_walls
21         self.nb_coins = nb_coins
22
23         self.nb_states_codes = 5
24         self.nb_actions_codes = 5
25         self.nb_rewards_codes = 3
26         self.nb_lookahead_rewards_codes = 4  # stands for -1, 0, +1, and UNKNOWN
27
28         self.first_states_code = 0
29         self.first_actions_code = self.first_states_code + self.nb_states_codes
30         self.first_rewards_code = self.first_actions_code + self.nb_actions_codes
31         self.first_lookahead_rewards_code = (
32             self.first_rewards_code + self.nb_rewards_codes
33         )
34         self.nb_codes = (
35             self.first_lookahead_rewards_code + self.nb_lookahead_rewards_codes
36         )
37
38         self.state_len = self.height * self.width
39         self.index_states = 0
40         self.index_reward = self.state_len
41         self.index_lookahead_reward = self.state_len + 1
42         self.index_action = self.state_len + 2
43         self.it_len = self.state_len + 3  # lookahead_reward / state / action / reward
44
45     def state2code(self, r):
46         return r + self.first_states_code
47
48     def code2state(self, r):
49         return r - self.first_states_code
50
51     def action2code(self, r):
52         return r + self.first_actions_code
53
54     def code2action(self, r):
55         return r - self.first_actions_code
56
57     def reward2code(self, r):
58         return r + 1 + self.first_rewards_code
59
60     def code2reward(self, r):
61         return r - self.first_rewards_code - 1
62
63     def lookahead_reward2code(self, r):
64         # -1, 0, +1 or 2 for UNKNOWN
65         return r + 1 + self.first_lookahead_rewards_code
66
67     def code2lookahead_reward(self, r):
68         return r - self.first_lookahead_rewards_code - 1
69
70     ######################################################################
71
72     def generate_episodes(self, nb):
73         rnd = torch.rand(nb, self.height, self.width)
74         rnd[:, 0, :] = 0
75         rnd[:, -1, :] = 0
76         rnd[:, :, 0] = 0
77         rnd[:, :, -1] = 0
78         wall = 0
79         for k in range(self.nb_walls):
80             wall = wall + (
81                 rnd.flatten(1).argmax(dim=1)[:, None]
82                 == torch.arange(rnd.flatten(1).size(1))[None, :]
83             ).long().reshape(rnd.size())
84
85             rnd = rnd * (1 - wall.clamp(max=1))
86
87         rnd = torch.rand(nb, self.height, self.width)
88         rnd[:, 0, 0] = 0  # Do not put coin at the agent's starting
89         # position
90         coins = torch.zeros(nb, self.T, self.height, self.width, dtype=torch.int64)
91         rnd = rnd * (1 - wall.clamp(max=1))
92         for k in range(self.nb_coins):
93             coins[:, 0] = coins[:, 0] + (
94                 rnd.flatten(1).argmax(dim=1)[:, None]
95                 == torch.arange(rnd.flatten(1).size(1))[None, :]
96             ).long().reshape(rnd.size())
97
98             rnd = rnd * (1 - coins[:, 0].clamp(max=1))
99
100         states = wall[:, None, :, :].expand(-1, self.T, -1, -1).clone()
101
102         agent = torch.zeros(states.size(), dtype=torch.int64)
103         agent[:, 0, 0, 0] = 1
104         agent_actions = torch.randint(5, (nb, self.T))
105         rewards = torch.zeros(nb, self.T, dtype=torch.int64)
106
107         troll = torch.zeros(states.size(), dtype=torch.int64)
108         troll[:, 0, -1, -1] = 1
109         troll_actions = torch.randint(5, (nb, self.T))
110
111         all_moves = agent.new(nb, 5, self.height, self.width)
112         for t in range(self.T - 1):
113             all_moves.zero_()
114             all_moves[:, 0] = agent[:, t]
115             all_moves[:, 1, 1:, :] = agent[:, t, :-1, :]
116             all_moves[:, 2, :-1, :] = agent[:, t, 1:, :]
117             all_moves[:, 3, :, 1:] = agent[:, t, :, :-1]
118             all_moves[:, 4, :, :-1] = agent[:, t, :, 1:]
119             a = F.one_hot(agent_actions[:, t], num_classes=5)[:, :, None, None]
120             after_move = (all_moves * a).sum(dim=1)
121             collision = (
122                 (after_move * (1 - wall) * (1 - troll[:, t]))
123                 .flatten(1)
124                 .sum(dim=1)[:, None, None]
125                 == 0
126             ).long()
127             agent[:, t + 1] = collision * agent[:, t] + (1 - collision) * after_move
128
129             all_moves.zero_()
130             all_moves[:, 0] = troll[:, t]
131             all_moves[:, 1, 1:, :] = troll[:, t, :-1, :]
132             all_moves[:, 2, :-1, :] = troll[:, t, 1:, :]
133             all_moves[:, 3, :, 1:] = troll[:, t, :, :-1]
134             all_moves[:, 4, :, :-1] = troll[:, t, :, 1:]
135             a = F.one_hot(troll_actions[:, t], num_classes=5)[:, :, None, None]
136             after_move = (all_moves * a).sum(dim=1)
137             collision = (
138                 (after_move * (1 - wall) * (1 - agent[:, t + 1]))
139                 .flatten(1)
140                 .sum(dim=1)[:, None, None]
141                 == 0
142             ).long()
143             troll[:, t + 1] = collision * troll[:, t] + (1 - collision) * after_move
144
145             hit = (
146                 (agent[:, t + 1, 1:, :] * troll[:, t + 1, :-1, :]).flatten(1).sum(dim=1)
147                 + (agent[:, t + 1, :-1, :] * troll[:, t + 1, 1:, :])
148                 .flatten(1)
149                 .sum(dim=1)
150                 + (agent[:, t + 1, :, 1:] * troll[:, t + 1, :, :-1])
151                 .flatten(1)
152                 .sum(dim=1)
153                 + (agent[:, t + 1, :, :-1] * troll[:, t + 1, :, 1:])
154                 .flatten(1)
155                 .sum(dim=1)
156             )
157             hit = (hit > 0).long()
158
159             # assert hit.min() == 0 and hit.max() <= 1
160
161             got_coin = (agent[:, t + 1] * coins[:, t]).flatten(1).sum(dim=1)
162             coins[:, t + 1] = coins[:, t] * (1 - agent[:, t + 1])
163
164             rewards[:, t + 1] = -hit + (1 - hit) * got_coin
165
166         states = states + 2 * agent + 3 * troll + 4 * coins * (1 - troll)
167
168         return states, agent_actions, rewards
169
170     ######################################################################
171
172     def episodes2seq(self, states, actions, rewards):
173         neg = rewards.new_zeros(rewards.size())
174         pos = rewards.new_zeros(rewards.size())
175         for t in range(neg.size(1) - 1):
176             neg[:, t] = rewards[:, t:].min(dim=-1).values
177             pos[:, t] = rewards[:, t:].max(dim=-1).values
178         s = (neg < 0).long() * neg + (neg >= 0).long() * pos
179
180         return torch.cat(
181             [
182                 self.state2code(states.flatten(2)),
183                 self.reward2code(rewards[:, :, None]),
184                 self.lookahead_reward2code(s[:, :, None]),
185                 self.action2code(actions[:, :, None]),
186             ],
187             dim=2,
188         ).flatten(1)
189
190     def seq2episodes(self, seq):
191         seq = seq.reshape(seq.size(0), -1, self.height * self.width + 3)
192         lookahead_rewards = self.code2lookahead_reward(seq[:, :, 0])
193         states = self.code2state(seq[:, :, 1 : self.height * self.width + 1])
194         states = states.reshape(states.size(0), states.size(1), self.height, self.width)
195         actions = self.code2action(seq[:, :, self.height * self.width + 1])
196         rewards = self.code2reward(seq[:, :, self.height * self.width + 2])
197         return lookahead_rewards, states, actions, rewards
198
199     def seq2str(self, seq):
200         def token2str(t):
201             if (
202                 t >= self.first_states_code
203                 and t < self.first_states_code + self.nb_states_codes
204             ):
205                 return "_#@T$"[t - self.first_states_code]
206             elif (
207                 t >= self.first_actions_code
208                 and t < self.first_actions_code + self.nb_actions_codes
209             ):
210                 return "ISNEW"[t - self.first_actions_code]
211             elif (
212                 t >= self.first_rewards_code
213                 and t < self.first_rewards_code + self.nb_rewards_codes
214             ):
215                 return "-0+"[t - self.first_rewards_code]
216             elif (
217                 t >= self.first_lookahead_rewards_code
218                 and t
219                 < self.first_lookahead_rewards_code + self.nb_lookahead_rewards_codes
220             ):
221                 return "n.pU"[t - self.first_lookahead_rewards_code]
222             else:
223                 return "?"
224
225         return ["".join([token2str(x.item()) for x in row]) for row in seq]
226
227     ######################################################################
228
229     def episodes2str(
230         self,
231         lookahead_rewards,
232         states,
233         actions,
234         rewards,
235         unicode=False,
236         ansi_colors=False,
237     ):
238         if unicode:
239             symbols = "·█@T$"
240             # vert, hori, cross, thin_hori = "║", "═", "╬", "─"
241             vert, hori, cross, thin_vert, thin_hori = "┃", "━", "╋", "│", "─"
242         else:
243             symbols = " #@T$"
244             vert, hori, cross, thin_vert, thin_hori = "|", "-", "+", "|", "-"
245
246         hline = (cross + hori * states.size(-1)) * states.size(1) + cross + "\n"
247
248         result = hline
249
250         for n in range(states.size(0)):
251
252             def state_symbol(v):
253                 v = v.item()
254                 return "?" if v < 0 or v >= len(symbols) else symbols[v]
255
256             for i in range(states.size(2)):
257                 result += (
258                     vert
259                     + vert.join(
260                         [
261                             "".join([state_symbol(v) for v in row])
262                             for row in states[n, :, i]
263                         ]
264                     )
265                     + vert
266                     + "\n"
267                 )
268
269             # result += (vert + thin_hori * states.size(-1)) * states.size(1) + vert + "\n"
270
271             def status_bar(a, r, lr=None):
272                 a, r = a.item(), r.item()
273                 sb_a = "ISNEW"[a] if a >= 0 and a < 5 else "?"
274                 sb_r = "- +"[r + 1] if r in {-1, 0, 1} else "?"
275                 if lr is None:
276                     sb_lr = ""
277                 else:
278                     lr = lr.item()
279                     sb_lr = "n pU"[lr + 1] if lr in {-1, 0, 1, 2} else "?"
280                 return (
281                     sb_a
282                     + "/"
283                     + sb_r
284                     + " " * (states.size(-1) - 1 - len(sb_a + sb_r + sb_lr))
285                     + sb_lr
286                 )
287
288             result += (
289                 vert
290                 + vert.join(
291                     [
292                         status_bar(a, r, lr)
293                         for a, r, lr in zip(
294                             actions[n], rewards[n], lookahead_rewards[n]
295                         )
296                     ]
297                 )
298                 + vert
299                 + "\n"
300             )
301
302             result += hline
303
304         if ansi_colors:
305             for u, c in [("T", 31), ("@", 32), ("$", 34)]:
306                 result = result.replace(u, f"\u001b[{c}m{u}\u001b[0m")
307
308         return result
309
310     ######################################################################
311
312     def save_seq_as_anim_script(self, seq, filename):
313         it_len = self.height * self.width + 3
314
315         seq = (
316             seq.reshape(seq.size(0), -1, it_len)
317             .permute(1, 0, 2)
318             .reshape(self.T, seq.size(0), -1)
319         )
320
321         with open(filename, "w") as f:
322             for t in range(self.T):
323                 # f.write("clear\n")
324                 f.write("cat << EOF\n")
325                 f.write("\u001b[H")
326                 # for i in range(seq.size(2)):
327                 # lr, s, a, r = seq2episodes(seq[t : t + 1, :, i], self.height, self.width)
328                 lr, s, a, r = self.seq2episodes(seq[t : t + 1, :].reshape(8, -1))
329                 f.write(self.episodes2str(lr, s, a, r, unicode=True, ansi_colors=True))
330                 f.write("EOF\n")
331                 f.write("sleep 0.25\n")
332             print(f"Saved {filename}")
333
334
335 if __name__ == "__main__":
336     gw = GreedWorld(height=5, width=7, T=10, nb_walls=4, nb_coins=2)
337     states, actions, rewards = gw.generate_episodes(nb=6)
338     seq = gw.episodes2seq(states, actions, rewards)
339     lr, s, a, r = gw.seq2episodes(seq)
340     print(gw.episodes2str(lr, s, a, r, unicode=True, ansi_colors=True))
341
342     print()
343     for s in gw.seq2str(seq):
344         print(s)
345
346     gw = GreedWorld(height=5, width=7, T=100, nb_walls=4, nb_coins=2)
347     states, actions, rewards = gw.generate_episodes(nb=128)
348     seq = gw.episodes2seq(states, actions, rewards)
349     gw.save_seq_as_anim_script(seq, "anim.sh")