X-Git-Url: https://fleuret.org/cgi-bin/gitweb/gitweb.cgi?a=blobdiff_plain;f=tasks.py;h=dba6e13c5414408abeb7fa18ec131fe7756f999a;hb=9664af37378218468190741c9ea5c3d7cb231926;hp=78910a06d7a24977b462af0836a9219073381507;hpb=b35745d09b33aed20670ecb96726f89206487a24;p=picoclvr.git diff --git a/tasks.py b/tasks.py index 78910a0..dba6e13 100755 --- a/tasks.py +++ b/tasks.py @@ -71,7 +71,7 @@ class Task: class TaskFromFile(Task): - def tensorize(self, pairs): + def tensorize(self, pairs, shuffle): len_max = max([len(x[0]) for x in pairs]) input = torch.cat( @@ -98,6 +98,11 @@ class TaskFromFile(Task): 0, ).to("cpu") + if shuffle: + i = torch.randperm(input.size(0)) + input = input[i].contiguous() + pred_mask = pred_mask[i].contiguous() + return input, pred_mask # trim all the tensors in the tuple z to remove as much token from @@ -117,32 +122,52 @@ class TaskFromFile(Task): def __init__( self, - filename, + train_filename, + test_filename, nb_train_samples, nb_test_samples, batch_size, + shuffle=False, device=torch.device("cpu"), ): self.batch_size = batch_size self.device = device - pairs = [] - with open(filename, "r") as f: - for _ in range(nb_train_samples + nb_test_samples): - sequence = f.readline().strip() - pred_mask = f.readline().strip() - assert len(sequence) == len(pred_mask) - assert set(pred_mask).issubset({"0", "1", "2"}), f"{set(pred_mask)}" - pairs.append((sequence, pred_mask)) - - symbols = ["#"] + list(set("".join([x[0] for x in pairs])) - set(["#"])) + def read_file(filename, nb=-1): + pairs = [] + with open(filename, "r") as f: + while True: + sequence = f.readline().strip() + if not sequence: + break + pred_mask = f.readline().strip() + assert len(sequence) == len(pred_mask) + assert set(pred_mask).issubset({"0", "1", "2"}), f"{set(pred_mask)}" + pairs.append((sequence, pred_mask)) + if len(pairs) == nb: + break + + if nb > 0: + pairs = pairs[:nb] + assert len(pairs) == nb + + return pairs + + train_pairs = read_file(train_filename, nb_train_samples) + test_pairs = read_file(test_filename, nb_test_samples) + + symbols = ["#"] + list( + set("".join([x[0] for x in train_pairs + test_pairs])) - set(["#"]) + ) self.char2id = dict([(c, n) for n, c in enumerate(symbols)]) self.id2char = dict([(n, c) for c, n in self.char2id.items()]) self.train_input, self.train_pred_masks = self.tensorize( - pairs[:nb_train_samples] + train_pairs, shuffle=shuffle + ) + self.test_input, self.test_pred_masks = self.tensorize( + test_pairs, shuffle=shuffle ) - self.test_input, self.test_pred_masks = self.tensorize(pairs[nb_train_samples:]) def batches(self, split="train", nb_to_use=-1, desc=None): assert split in {"train", "test"} @@ -173,7 +198,7 @@ class TaskFromFile(Task): logger(f"----------------------------------------------------------") - for e in self.tensor2str(result[:10]): + for e in self.tensor2str(result[:50]): logger(f"test_before {e}") masked_inplace_autoregression( @@ -187,7 +212,7 @@ class TaskFromFile(Task): logger(f"----------------------------------------------------------") - for e, c in zip(self.tensor2str(result[:10]), self.tensor2str(correct[:10])): + for e, c in zip(self.tensor2str(result[:50]), self.tensor2str(correct[:50])): logger(f"test_after {e}") logger(f"correct {c}") @@ -1836,3 +1861,86 @@ class QMLP(Task): ###################################################################### + +import escape + + +class Escape(Task): + def __init__( + self, + nb_train_samples, + nb_test_samples, + batch_size, + height, + width, + T, + logger=None, + device=torch.device("cpu"), + ): + super().__init__() + + self.batch_size = batch_size + self.device = device + self.height = height + self.width = width + + states, actions, rewards = escape.generate_episodes( + nb_train_samples + nb_test_samples, height, width, T + ) + seq = escape.episodes2seq(states, actions, rewards) + self.train_input = seq[:nb_train_samples].to(self.device) + self.test_input = seq[nb_train_samples:].to(self.device) + + self.nb_codes = max(self.train_input.max(), self.test_input.max()) + 1 + + # if logger is not None: + # for s, a in zip(self.train_input[:100], self.train_ar_mask[:100]): + # logger(f"train_sequences {self.problem.seq2str(s)}") + # a = "".join(["01"[x.item()] for x in a]) + # logger(f" {a}") + + def batches(self, split="train", nb_to_use=-1, desc=None): + assert split in {"train", "test"} + input = self.train_input if split == "train" else self.test_input + if nb_to_use > 0: + input = input[:nb_to_use] + if desc is None: + desc = f"epoch-{split}" + for batch in tqdm.tqdm( + input.split(self.batch_size), dynamic_ncols=True, desc=desc + ): + yield batch + + def vocabulary_size(self): + return self.nb_codes + + def produce_results( + self, n_epoch, model, result_dir, logger, deterministic_synthesis, nmax=1000 + ): + result = self.test_input[:100].clone() + ar_mask = ( + torch.arange(result.size(1), device=result.device) + > self.height * self.width + 2 + ).long()[None, :] + ar_mask = ar_mask.expand_as(result) + result *= 1 - ar_mask # paraaaaanoiaaaaaaa + + masked_inplace_autoregression( + model, + self.batch_size, + result, + ar_mask, + deterministic_synthesis, + device=self.device, + ) + + s, a, r = escape.seq2episodes(result, self.height, self.width) + str = escape.episodes2str(s, a, r, unicode=True, ansi_colors=True) + + filename = os.path.join(result_dir, f"test_seq_{n_epoch:04d}.txt") + with open(filename, "w") as f: + f.write(str) + logger(f"wrote {filename}") + + +######################################################################