X-Git-Url: https://fleuret.org/cgi-bin/gitweb/gitweb.cgi?a=blobdiff_plain;ds=sidebyside;f=main.py;h=324aeba663a5b4c7453ce03ff5deb7062e1da7e3;hb=fd2166de6350fc3f2b3fdb90849115574e3ae843;hp=319e94b1856f49f322d593bc902bebfc09a6a2d3;hpb=b5fd9b344c8c782460941c604b6e637d7549fe7d;p=picoclvr.git diff --git a/main.py b/main.py index 319e94b..324aeba 100755 --- a/main.py +++ b/main.py @@ -32,7 +32,10 @@ parser = argparse.ArgumentParser( ) parser.add_argument( - "--task", type=str, default="picoclvr", help="picoclvr, mnist, maze, snake, stack, expr" + "--task", + type=str, + default="picoclvr", + help="picoclvr, mnist, maze, snake, stack, expr", ) parser.add_argument("--log_filename", type=str, default="train.log", help=" ") @@ -223,7 +226,6 @@ def masked_inplace_autoregression( progress_bar_desc="autoregression", device=torch.device("cpu"), ): - batches = zip(input.split(batch_size), ar_mask.split(batch_size)) if progress_bar_desc is not None: @@ -1018,11 +1020,36 @@ class TaskExpr(Task): train_sequences = expr.generate_sequences(nb_train_samples) test_sequences = expr.generate_sequences(nb_test_samples) - self.char2id = dict([ (c,n) for n,c in enumerate(set("".join(train_sequences + test_sequences))) ]) - self.id2char = dict([ (n,c) for n,c in self.char2id.items() ]) + self.char2id = dict( + [ + (c, n) + for n, c in enumerate(set("#"+"".join(train_sequences + test_sequences))) + ] + ) + self.id2char = dict([(n, c) for c, n in self.char2id.items()]) len_max = max([len(x) for x in train_sequences + test_sequences]) - self.train_input = torch.cat([torch.tensor([char2id(c) for c in s + " "*(len_max-len(s))] for s in train_sequences)], 0) - self.test_input = torch.cat([torch.tensor([char2id(c) for c in s + " "*(len_max-len(s))] for s in test_sequences)], 0) + self.train_input = torch.cat( + [ + torch.tensor( + [ + [self.char2id[c] for c in s + "#" * (len_max - len(s))] + for s in train_sequences + ] + ) + ], + 0, + ).to(device) + self.test_input = torch.cat( + [ + torch.tensor( + [ + [self.char2id[c] for c in s + "#" * (len_max - len(s))] + for s in test_sequences + ] + ) + ], + 0, + ).to(device) self.nb_codes = max(self.train_input.max(), self.test_input.max()) + 1 def batches(self, split="train", nb_to_use=-1, desc=None): @@ -1041,26 +1068,21 @@ class TaskExpr(Task): return self.nb_codes def produce_results(self, n_epoch, model): - # !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!! with torch.autograd.no_grad(): t = model.training model.eval() def compute_nb_correct(input): result = input.clone() - stack.remove_popped_values(result, self.nb_stacks, self.nb_digits) - ar_mask = (result != input).long() + space = self.char2id["#"] + ar_mask = (result == space).long().cumsum(dim=1).clamp(max=1) + result = (1 - ar_mask) * result + space * ar_mask masked_inplace_autoregression( model, self.batch_size, result, ar_mask, device=self.device ) - errors = ((result != input).long() * ar_mask).reshape( - -1, 1 + self.nb_digits - ) - ar_mask = ar_mask.reshape(-1, 1 + self.nb_digits) - - nb_total = ar_mask.max(1).values.sum() - nb_correct = nb_total - errors.max(1).values.sum() + nb_total = ar_mask.sum() + nb_correct = ((input == result).long() * ar_mask).sum() return nb_total, nb_correct @@ -1072,21 +1094,20 @@ class TaskExpr(Task): ############################################################## # Log a few generated sequences - input = self.test_input[:10, : 12 * (1 + self.nb_digits)] + input = self.test_input[:10] result = input.clone() - stack.remove_popped_values(result, self.nb_stacks, self.nb_digits) - ar_mask = (result != input).long() + space = self.char2id["#"] + ar_mask = (result == space).long().cumsum(dim=1).clamp(max=1) + result = (1 - ar_mask) * result + space * ar_mask for n in range(result.size(0)): - log_string( - f"test_before {stack.seq_to_str(result[n],nb_stacks=self.nb_stacks,nb_digits=self.nb_digits)}" - ) + s = "".join([self.id2char[k.item()] for k in result[n]]) + log_string(f"test_before {s}") masked_inplace_autoregression( model, self.batch_size, result, ar_mask, device=self.device ) for n in range(result.size(0)): - log_string( - f"test_after {stack.seq_to_str(result[n],nb_stacks=self.nb_stacks,nb_digits=self.nb_digits)}" - ) + s = "".join([self.id2char[k.item()] for k in result[n]]) + log_string(f"test_after {s}") ############################################################## model.train(t)