X-Git-Url: https://fleuret.org/cgi-bin/gitweb/gitweb.cgi?a=blobdiff_plain;f=tasks.py;h=443419eb340704273b64152edadb1286aae50cbf;hb=refs%2Fheads%2Fmaster;hp=78910a06d7a24977b462af0836a9219073381507;hpb=b35745d09b33aed20670ecb96726f89206487a24;p=picoclvr.git diff --git a/tasks.py b/tasks.py index 78910a0..443419e 100755 --- a/tasks.py +++ b/tasks.py @@ -5,7 +5,7 @@ # Written by Francois Fleuret -import math, os, tqdm +import math, os, tqdm, warnings import torch, torchvision @@ -27,6 +27,7 @@ def masked_inplace_autoregression( ar_mask, deterministic_synthesis, forbidden_tokens=None, + logit_biases=None, progress_bar_desc="autoregression", device=torch.device("cpu"), ): @@ -48,7 +49,11 @@ def masked_inplace_autoregression( for input, ar_mask in batches: model.masked_inplace_autoregression( - input, ar_mask, forbidden_tokens, deterministic_synthesis + input, + ar_mask, + deterministic_synthesis, + forbidden_tokens, + logit_biases, ) model.train(t) @@ -58,7 +63,7 @@ def masked_inplace_autoregression( class Task: - def batches(self, split="train"): + def batches(self, split="train", nb_to_use=-1, desc=None): pass def vocabulary_size(self): @@ -71,7 +76,7 @@ class Task: class TaskFromFile(Task): - def tensorize(self, pairs): + def tensorize(self, pairs, shuffle): len_max = max([len(x[0]) for x in pairs]) input = torch.cat( @@ -98,6 +103,11 @@ class TaskFromFile(Task): 0, ).to("cpu") + if shuffle: + i = torch.randperm(input.size(0)) + input = input[i].contiguous() + pred_mask = pred_mask[i].contiguous() + return input, pred_mask # trim all the tensors in the tuple z to remove as much token from @@ -117,32 +127,52 @@ class TaskFromFile(Task): def __init__( self, - filename, + train_filename, + test_filename, nb_train_samples, nb_test_samples, batch_size, + shuffle=False, device=torch.device("cpu"), ): self.batch_size = batch_size self.device = device - pairs = [] - with open(filename, "r") as f: - for _ in range(nb_train_samples + nb_test_samples): - sequence = f.readline().strip() - pred_mask = f.readline().strip() - assert len(sequence) == len(pred_mask) - assert set(pred_mask).issubset({"0", "1", "2"}), f"{set(pred_mask)}" - pairs.append((sequence, pred_mask)) - - symbols = ["#"] + list(set("".join([x[0] for x in pairs])) - set(["#"])) + def read_file(filename, nb=-1): + pairs = [] + with open(filename, "r") as f: + while True: + sequence = f.readline().strip() + if not sequence: + break + pred_mask = f.readline().strip() + assert len(sequence) == len(pred_mask) + assert set(pred_mask).issubset({"0", "1", "2"}), f"{set(pred_mask)}" + pairs.append((sequence, pred_mask)) + if len(pairs) == nb: + break + + if nb > 0: + pairs = pairs[:nb] + assert len(pairs) == nb + + return pairs + + train_pairs = read_file(train_filename, nb_train_samples) + test_pairs = read_file(test_filename, nb_test_samples) + + symbols = ["#"] + list( + set("".join([x[0] for x in train_pairs + test_pairs])) - set(["#"]) + ) self.char2id = dict([(c, n) for n, c in enumerate(symbols)]) self.id2char = dict([(n, c) for c, n in self.char2id.items()]) self.train_input, self.train_pred_masks = self.tensorize( - pairs[:nb_train_samples] + train_pairs, shuffle=shuffle + ) + self.test_input, self.test_pred_masks = self.tensorize( + test_pairs, shuffle=shuffle ) - self.test_input, self.test_pred_masks = self.tensorize(pairs[nb_train_samples:]) def batches(self, split="train", nb_to_use=-1, desc=None): assert split in {"train", "test"} @@ -173,7 +203,7 @@ class TaskFromFile(Task): logger(f"----------------------------------------------------------") - for e in self.tensor2str(result[:10]): + for e in self.tensor2str(result[:50]): logger(f"test_before {e}") masked_inplace_autoregression( @@ -187,7 +217,7 @@ class TaskFromFile(Task): logger(f"----------------------------------------------------------") - for e, c in zip(self.tensor2str(result[:10]), self.tensor2str(correct[:10])): + for e, c in zip(self.tensor2str(result[:50]), self.tensor2str(correct[:50])): logger(f"test_after {e}") logger(f"correct {c}") @@ -459,7 +489,7 @@ class PicoCLVR(Task): self.train_input = self.tensorize(self.train_descr) self.test_input = self.tensorize(self.test_descr) - def batches(self, split="train"): + def batches(self, split="train", nb_to_use=-1, desc=None): assert split in {"train", "test"} input = self.train_input if split == "train" else self.test_input for batch in tqdm.tqdm( @@ -724,15 +754,17 @@ class Maze(Task): def compute_error( self, model, split="train", nb_to_use=-1, deterministic_synthesis=False ): + model_device = next(model.parameters()).device nb_total, nb_correct = 0, 0 count = torch.zeros( self.width * self.height, self.width * self.height, - device=self.device, + device=model_device, dtype=torch.int64, ) for input in self.batches(split, nb_to_use): + input = input.to(model_device) result = input.clone() ar_mask = result.new_zeros(result.size()) ar_mask[:, self.height * self.width :] = 1 @@ -806,7 +838,7 @@ class Maze(Task): eol = " " if j < count.size(1) - 1 else "\n" f.write(f"{count[i,j]}{eol}") - input = self.test_input[:48] + input = self.test_input[:48].to(next(model.parameters()).device) result = input.clone() ar_mask = result.new_zeros(result.size()) ar_mask[:, self.height * self.width :] = 1 @@ -1068,6 +1100,34 @@ class Stack(Task): device=self.device, ) + #!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!! + for label, input in [ + ("train", self.train_input[:32]), + ("test", self.test_input[:32]), + ]: + output = model(BracketedSequence(input)).x + output = output.log_softmax(dim=-1) + filename = os.path.join( + result_dir, f"stack_with_crossentropy_{n_epoch:04d}_{label}.txt" + ) + with open(filename, "w") as f: + for n in range(input.size(0)): + s = stack.seq_to_str( + input[n], nb_stacks=self.nb_stacks, nb_digits=self.nb_digits + ) + for t, k, w in zip(range(input[n].size(0)), input[n], s.split(" ")): + u = ( + " " * (10 - len(w)) + + w + + " " + + str(output[n][t][k].exp().item()) + + "\n" + ) + f.write(u) + f.write("\n") + logger(f"wrote {filename}") + #!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!! + for n in range(result.size(0)): logger( f"test_after {stack.seq_to_str(result[n],nb_stacks=self.nb_stacks,nb_digits=self.nb_digits)}" @@ -1655,7 +1715,7 @@ class Grid(Task): self.t_nul = self.token2id["#"] self.t_true = self.token2id["true"] self.t_false = self.token2id["false"] - self.t_pipe = self.token2id["|"] + # self.t_pipe = self.token2id["|"] # Tokenize the train and test sets self.train_input = self.str2tensor(self.train_descr) @@ -1664,7 +1724,7 @@ class Grid(Task): None if len(self.play_descr) == 0 else self.str2tensor(self.play_descr) ) - def batches(self, split="train"): + def batches(self, split="train", nb_to_use=-1, desc=None): assert split in {"train", "test"} input = self.train_input if split == "train" else self.test_input for batch in tqdm.tqdm( @@ -1793,7 +1853,7 @@ class QMLP(Task): self.nb_codes = max(self.train_input.max(), self.test_input.max()) + 1 - def batches(self, split="train"): + def batches(self, split="train", nb_to_use=-1, desc=None): assert split in {"train", "test"} input = self.train_input if split == "train" else self.test_input for batch in tqdm.tqdm( @@ -1836,3 +1896,200 @@ class QMLP(Task): ###################################################################### + +import greed + + +class Greed(Task): + def __init__( + self, + nb_train_samples, + nb_test_samples, + batch_size, + height, + width, + T, + nb_walls, + nb_coins, + logger=None, + device=torch.device("cpu"), + ): + super().__init__() + + self.batch_size = batch_size + self.device = device + + self.world = greed.GreedWorld(height, width, T, nb_walls, nb_coins) + + states, actions, rewards = self.world.generate_episodes( + nb_train_samples + nb_test_samples + ) + seq = self.world.episodes2seq(states, actions, rewards) + self.train_input = seq[:nb_train_samples].to(self.device) + self.test_input = seq[nb_train_samples:].to(self.device) + + def wipe_lookahead_rewards(self, batch): + t = torch.arange(batch.size(1), device=batch.device)[None, :] + u = torch.randint(batch.size(1), (batch.size(0), 1), device=batch.device) + lr_mask = (t <= u).long() * ( + t % self.world.it_len == self.world.index_lookahead_reward + ).long() + + return ( + lr_mask * self.world.lookahead_reward2code(greed.REWARD_UNKNOWN) + + (1 - lr_mask) * batch + ) + + def batches(self, split="train", nb_to_use=-1, desc=None): + assert split in {"train", "test"} + input = self.train_input if split == "train" else self.test_input + if nb_to_use > 0: + input = input[:nb_to_use] + if desc is None: + desc = f"epoch-{split}" + for batch in tqdm.tqdm( + input.split(self.batch_size), dynamic_ncols=True, desc=desc + ): + yield self.wipe_lookahead_rewards(batch) + + def vocabulary_size(self): + return self.world.nb_codes + + def thinking_autoregression( + self, n_epoch, model, result_dir, logger, deterministic_synthesis, nmax=1000 + ): + snapshots = [] + + def ar(result, ar_mask, logit_biases=None): + ar_mask = ar_mask.expand_as(result) + result *= 1 - ar_mask + masked_inplace_autoregression( + model, + self.batch_size, + result, + ar_mask, + deterministic_synthesis=deterministic_synthesis, + logit_biases=logit_biases, + device=self.device, + progress_bar_desc=None, + ) + warnings.warn("keeping thinking snapshots", RuntimeWarning) + snapshots.append(result[:100].detach().clone()) + + # Generate iteration after iteration + + result = self.test_input[:250].clone() + # Erase all the content but that of the first iteration + result[:, self.world.it_len :] = -1 + # Set the lookahead_reward of the firs to UNKNOWN + result[:, self.world.index_lookahead_reward] = self.world.lookahead_reward2code( + greed.REWARD_UNKNOWN + ) + + t = torch.arange(result.size(1), device=result.device)[None, :] + + for u in tqdm.tqdm( + range(0, result.size(1), self.world.it_len), + desc="thinking", + ): + # Generate the next state but keep the initial one, the + # lookahead_reward of previous iterations are set to + # UNKNOWN + if u > 0: + result[ + :, u + self.world.index_lookahead_reward + ] = self.world.lookahead_reward2code(greed.REWARD_UNKNOWN) + ar_mask = (t >= u + self.world.index_states).long() * ( + t < u + self.world.index_states + self.world.state_len + ).long() + ar(result, ar_mask) + + # Generate the action and reward with lookahead_reward to +1 + result[ + :, u + self.world.index_lookahead_reward + ] = self.world.lookahead_reward2code(greed.REWARD_PLUS) + ar_mask = (t >= u + self.world.index_reward).long() * ( + t <= u + self.world.index_action + ).long() + ar(result, ar_mask) + + # Set the lookahead_reward to UNKNOWN for the next iterations + result[ + :, u + self.world.index_lookahead_reward + ] = self.world.lookahead_reward2code(greed.REWARD_UNKNOWN) + + filename = os.path.join(result_dir, f"test_thinking_compute_{n_epoch:04d}.txt") + with open(filename, "w") as f: + for n in range(snapshots[0].size(0)): + for s in snapshots: + lr, s, a, r = self.world.seq2episodes( + s[n : n + 1], + ) + str = self.world.episodes2str( + lr, s, a, r, unicode=True, ansi_colors=True + ) + f.write(str) + f.write("\n\n") + + # Saving the generated sequences + + lr, s, a, r = self.world.seq2episodes(result) + str = self.world.episodes2str(lr, s, a, r, unicode=True, ansi_colors=True) + + filename = os.path.join(result_dir, f"test_thinking_seq_{n_epoch:04d}.txt") + with open(filename, "w") as f: + f.write(str) + logger(f"wrote {filename}") + + def produce_results( + self, n_epoch, model, result_dir, logger, deterministic_synthesis, nmax=1000 + ): + result = self.wipe_lookahead_rewards(self.test_input[:250].clone()) + + # Saving the ground truth + + lr, s, a, r = self.world.seq2episodes( + result, + ) + str = self.world.episodes2str(lr, s, a, r, unicode=True, ansi_colors=True) + + filename = os.path.join(result_dir, f"test_true_seq_{n_epoch:04d}.txt") + with open(filename, "w") as f: + f.write(str) + logger(f"wrote {filename}") + + # Re-generating from the first frame + + ar_mask = ( + torch.arange(result.size(1), device=result.device) >= self.world.it_len + ).long()[None, :] + ar_mask = ar_mask.expand_as(result) + result *= 1 - ar_mask # paraaaaanoiaaaaaaa + + masked_inplace_autoregression( + model, + self.batch_size, + result, + ar_mask, + deterministic_synthesis, + device=self.device, + ) + + # Saving the generated sequences + + lr, s, a, r = self.world.seq2episodes( + result, + ) + str = self.world.episodes2str(lr, s, a, r, unicode=True, ansi_colors=True) + + filename = os.path.join(result_dir, f"test_seq_{n_epoch:04d}.txt") + with open(filename, "w") as f: + f.write(str) + logger(f"wrote {filename}") + + self.thinking_autoregression( + n_epoch, model, result_dir, logger, deterministic_synthesis, nmax + ) + + +######################################################################