X-Git-Url: https://fleuret.org/cgi-bin/gitweb/gitweb.cgi?a=blobdiff_plain;f=tasks.py;h=324376df60319e9549ae431c5d43dd04f1a29ed9;hb=232299b8af7e66a02e64bb2e47b525e2f50b099d;hp=f2b7709f1dc742979bad1e9a44e27da4525904d3;hpb=1eeba5d817d6e440a93895d42f6e580e9ba273fd;p=picoclvr.git diff --git a/tasks.py b/tasks.py index f2b7709..324376d 100755 --- a/tasks.py +++ b/tasks.py @@ -5,7 +5,7 @@ # Written by Francois Fleuret -import math, os, tqdm +import math, os, tqdm, warnings import torch, torchvision @@ -1867,10 +1867,10 @@ class QMLP(Task): ###################################################################### -import escape +import greed -class Escape(Task): +class Greed(Task): def __init__( self, nb_train_samples, @@ -1880,6 +1880,7 @@ class Escape(Task): width, T, nb_walls, + nb_coins, logger=None, device=torch.device("cpu"), ): @@ -1887,17 +1888,28 @@ class Escape(Task): self.batch_size = batch_size self.device = device - self.height = height - self.width = width - states, actions, rewards = escape.generate_episodes( - nb_train_samples + nb_test_samples, height, width, T, nb_walls + self.world = greed.GreedWorld(height, width, T, nb_walls, nb_coins) + + states, actions, rewards = self.world.generate_episodes( + nb_train_samples + nb_test_samples ) - seq = escape.episodes2seq(states, actions, rewards) - # seq = seq[:, seq.size(1) // 3 : 2 * seq.size(1) // 3] + seq = self.world.episodes2seq(states, actions, rewards) self.train_input = seq[:nb_train_samples].to(self.device) self.test_input = seq[nb_train_samples:].to(self.device) + def wipe_lookahead_rewards(self, batch): + t = torch.arange(batch.size(1), device=batch.device)[None, :] + u = torch.randint(batch.size(1), (batch.size(0), 1), device=batch.device) + lr_mask = (t <= u).long() * ( + t % self.world.it_len == self.world.index_lookahead_reward + ).long() + + return ( + lr_mask * self.world.lookahead_reward2code(greed.REWARD_UNKNOWN) + + (1 - lr_mask) * batch + ) + def batches(self, split="train", nb_to_use=-1, desc=None): assert split in {"train", "test"} input = self.train_input if split == "train" else self.test_input @@ -1908,25 +1920,15 @@ class Escape(Task): for batch in tqdm.tqdm( input.split(self.batch_size), dynamic_ncols=True, desc=desc ): - yield batch + yield self.wipe_lookahead_rewards(batch) def vocabulary_size(self): - return escape.nb_codes + return self.world.nb_codes def thinking_autoregression( self, n_epoch, model, result_dir, logger, deterministic_synthesis, nmax=1000 ): - result = self.test_input[:250].clone() - t = torch.arange(result.size(1), device=result.device)[None, :] - - state_len = self.height * self.width - index_lookahead_reward = 0 - index_states = 1 - index_action = state_len + 1 - index_reward = state_len + 2 - it_len = state_len + 3 # lookahead_reward / state / action / reward - - result[:, it_len:] = -1 + snapshots = [] def ar(result, ar_mask, logit_biases=None): ar_mask = ar_mask.expand_as(result) @@ -1941,46 +1943,59 @@ class Escape(Task): device=self.device, progress_bar_desc=None, ) + warnings.warn("keeping thinking snapshots", RuntimeWarning) + snapshots.append(result[:10].detach().clone()) # Generate iteration after iteration - optimistic_bias = result.new_zeros(escape.nb_codes, device=result.device) - optimistic_bias[escape.lookahead_reward2code(-1)] = -math.log(1e1) - optimistic_bias[escape.lookahead_reward2code(1)] = math.log(1e1) + result = self.test_input[:250].clone() + # Erase all the content but that of the first iteration + result[:, self.world.it_len :] = -1 + # Set the lookahead_reward of the firs to UNKNOWN + result[:, self.world.index_lookahead_reward] = self.world.lookahead_reward2code( + greed.REWARD_UNKNOWN + ) - snapshots = [] + t = torch.arange(result.size(1), device=result.device)[None, :] for u in tqdm.tqdm( - range(it_len, result.size(1) - it_len + 1, it_len), desc="thinking" + range(0, result.size(1), self.world.it_len), + desc="thinking", ): - # Generate the lookahead_reward and state - ar_mask = (t >= u + index_lookahead_reward).long() * ( - t < u + index_states + state_len + # Generate the next state but keep the initial one, the + # lookahead_reward of previous iterations are set to + # UNKNOWN + if u > 0: + result[ + :, u + self.world.index_lookahead_reward + ] = self.world.lookahead_reward2code(greed.REWARD_UNKNOWN) + ar_mask = (t >= u + self.world.index_states).long() * ( + t < u + self.world.index_states + self.world.state_len + ).long() + ar(result, ar_mask) + + # Generate the action and reward with lookahead_reward to +1 + result[ + :, u + self.world.index_lookahead_reward + ] = self.world.lookahead_reward2code(greed.REWARD_PLUS) + ar_mask = (t >= u + self.world.index_reward).long() * ( + t <= u + self.world.index_action ).long() ar(result, ar_mask) - snapshots.append(result[:10].detach().clone()) - backup_lookahead_reward = result[:, u + index_lookahead_reward] - - # Re-generate the lookahead_reward - ar_mask = (t == u + index_lookahead_reward).long() - ar(result, ar_mask, logit_biases=optimistic_bias) - snapshots.append(result[:10].detach().clone()) - - # Generate the action and reward - ar_mask = (t >= u + index_action).long() * (t <= u + index_reward).long() - ar(result, ar_mask) - snapshots.append(result[:10].detach().clone()) - result[:, u + index_lookahead_reward] = backup_lookahead_reward + # Set the lookahead_reward to UNKNOWN for the next iterations + result[ + :, u + self.world.index_lookahead_reward + ] = self.world.lookahead_reward2code(gree.REWARD_UNKNOWN) filename = os.path.join(result_dir, f"test_thinking_compute_{n_epoch:04d}.txt") with open(filename, "w") as f: for n in range(10): for s in snapshots: - lr, s, a, r = escape.seq2episodes( - s[n : n + 1], self.height, self.width + lr, s, a, r = self.world.seq2episodes( + s[n : n + 1], ) - str = escape.episodes2str( + str = self.world.episodes2str( lr, s, a, r, unicode=True, ansi_colors=True ) f.write(str) @@ -1988,8 +2003,8 @@ class Escape(Task): # Saving the generated sequences - lr, s, a, r = escape.seq2episodes(result, self.height, self.width) - str = escape.episodes2str(lr, s, a, r, unicode=True, ansi_colors=True) + lr, s, a, r = self.world.seq2episodes(result) + str = self.world.episodes2str(lr, s, a, r, unicode=True, ansi_colors=True) filename = os.path.join(result_dir, f"test_thinking_seq_{n_epoch:04d}.txt") with open(filename, "w") as f: @@ -1999,16 +2014,14 @@ class Escape(Task): def produce_results( self, n_epoch, model, result_dir, logger, deterministic_synthesis, nmax=1000 ): - result = self.test_input[:250].clone() + result = self.wipe_lookahead_rewards(self.test_input[:250].clone()) # Saving the ground truth - lr, s, a, r = escape.seq2episodes( + lr, s, a, r = self.world.seq2episodes( result, - self.height, - self.width, ) - str = escape.episodes2str(lr, s, a, r, unicode=True, ansi_colors=True) + str = self.world.episodes2str(lr, s, a, r, unicode=True, ansi_colors=True) filename = os.path.join(result_dir, f"test_true_seq_{n_epoch:04d}.txt") with open(filename, "w") as f: @@ -2018,8 +2031,7 @@ class Escape(Task): # Re-generating from the first frame ar_mask = ( - torch.arange(result.size(1), device=result.device) - >= self.height * self.width + 3 + torch.arange(result.size(1), device=result.device) >= self.world.it_len ).long()[None, :] ar_mask = ar_mask.expand_as(result) result *= 1 - ar_mask # paraaaaanoiaaaaaaa @@ -2035,12 +2047,10 @@ class Escape(Task): # Saving the generated sequences - lr, s, a, r = escape.seq2episodes( + lr, s, a, r = self.world.seq2episodes( result, - self.height, - self.width, ) - str = escape.episodes2str(lr, s, a, r, unicode=True, ansi_colors=True) + str = self.world.episodes2str(lr, s, a, r, unicode=True, ansi_colors=True) filename = os.path.join(result_dir, f"test_seq_{n_epoch:04d}.txt") with open(filename, "w") as f: