X-Git-Url: https://fleuret.org/cgi-bin/gitweb/gitweb.cgi?a=blobdiff_plain;f=main.py;h=35bf02c0123815b31d479df63f935839c7523b33;hb=02b0a7bb770f07f2e91f1c77b899815516087b6a;hp=324aeba663a5b4c7453ce03ff5deb7062e1da7e3;hpb=fd2166de6350fc3f2b3fdb90849115574e3ae843;p=picoclvr.git diff --git a/main.py b/main.py index 324aeba..35bf02c 100755 --- a/main.py +++ b/main.py @@ -120,6 +120,13 @@ parser.add_argument("--stack_nb_digits", type=int, default=3) parser.add_argument("--stack_fraction_values_for_train", type=float, default=None) +############################## +# Expr options + +parser.add_argument("--expr_nb_variables", type=int, default=5) + +parser.add_argument("--expr_sequence_length", type=int, default=30) + ###################################################################### args = parser.parse_args() @@ -163,10 +170,10 @@ default_args = { "nb_test_samples": 1000, }, "expr": { - "nb_epochs": 5, + "nb_epochs": 50, "batch_size": 25, - "nb_train_samples": 100000, - "nb_test_samples": 1000, + "nb_train_samples": 250000, + "nb_test_samples": 10000, }, } @@ -1012,22 +1019,38 @@ class TaskExpr(Task): self, nb_train_samples, nb_test_samples, + nb_variables, + sequence_length, batch_size, device=torch.device("cpu"), ): self.batch_size = batch_size self.device = device - train_sequences = expr.generate_sequences(nb_train_samples) - test_sequences = expr.generate_sequences(nb_test_samples) + train_sequences = expr.generate_sequences( + nb_train_samples, + nb_variables=nb_variables, + length=2 * sequence_length, + randomize_length=True, + ) + test_sequences = expr.generate_sequences( + nb_test_samples, + nb_variables=nb_variables, + length=sequence_length, + ) self.char2id = dict( [ (c, n) - for n, c in enumerate(set("#"+"".join(train_sequences + test_sequences))) + for n, c in enumerate( + set("#" + "".join(train_sequences + test_sequences)) + ) ] ) self.id2char = dict([(n, c) for c, n in self.char2id.items()]) - len_max = max([len(x) for x in train_sequences + test_sequences]) + + self.filler, self.space = self.char2id["#"], self.char2id[" "] + + len_max = max([len(x) for x in train_sequences]) self.train_input = torch.cat( [ torch.tensor( @@ -1039,6 +1062,8 @@ class TaskExpr(Task): ], 0, ).to(device) + + len_max = max([len(x) for x in test_sequences]) self.test_input = torch.cat( [ torch.tensor( @@ -1050,6 +1075,7 @@ class TaskExpr(Task): ], 0, ).to(device) + self.nb_codes = max(self.train_input.max(), self.test_input.max()) + 1 def batches(self, split="train", nb_to_use=-1, desc=None): @@ -1062,11 +1088,17 @@ class TaskExpr(Task): for batch in tqdm.tqdm( input.split(self.batch_size), dynamic_ncols=True, desc=desc ): + if split == "train": + last = (batch != self.filler).max(0).values.nonzero().max() + 1 + batch = batch[:, :last] yield batch def vocabulary_size(self): return self.nb_codes + def seq2str(self, s): + return "".join([self.id2char[k.item()] for k in s]) + def produce_results(self, n_epoch, model): with torch.autograd.no_grad(): t = model.training @@ -1074,15 +1106,14 @@ class TaskExpr(Task): def compute_nb_correct(input): result = input.clone() - space = self.char2id["#"] - ar_mask = (result == space).long().cumsum(dim=1).clamp(max=1) - result = (1 - ar_mask) * result + space * ar_mask + ar_mask = (result == self.space).long().cumsum(dim=1).clamp(max=1) + result = (1 - ar_mask) * result + ar_mask * self.filler masked_inplace_autoregression( model, self.batch_size, result, ar_mask, device=self.device ) - nb_total = ar_mask.sum() - nb_correct = ((input == result).long() * ar_mask).sum() + nb_total = input.size(0) + nb_correct = (input == result).long().min(1).values.sum() return nb_total, nb_correct @@ -1096,18 +1127,18 @@ class TaskExpr(Task): # Log a few generated sequences input = self.test_input[:10] result = input.clone() - space = self.char2id["#"] - ar_mask = (result == space).long().cumsum(dim=1).clamp(max=1) - result = (1 - ar_mask) * result + space * ar_mask + ar_mask = (result == self.space).long().cumsum(dim=1).clamp(max=1) + result = (1 - ar_mask) * result + ar_mask * self.filler for n in range(result.size(0)): - s = "".join([self.id2char[k.item()] for k in result[n]]) - log_string(f"test_before {s}") + log_string(f"test_before {self.seq2str(result[n])}") masked_inplace_autoregression( model, self.batch_size, result, ar_mask, device=self.device ) + correct = (1 - ar_mask) * self.space + ar_mask * input for n in range(result.size(0)): - s = "".join([self.id2char[k.item()] for k in result[n]]) - log_string(f"test_after {s}") + comment = "GOOD" if (result[n] - input[n]).abs().max() == 0 else "" + log_string(f"test_after {self.seq2str(result[n])} {comment}") + log_string(f"correct {self.seq2str(correct[n])}") ############################################################## model.train(t) @@ -1193,6 +1224,8 @@ elif args.task == "expr": task = TaskExpr( nb_train_samples=args.nb_train_samples, nb_test_samples=args.nb_test_samples, + nb_variables=args.expr_nb_variables, + sequence_length=args.expr_sequence_length, batch_size=args.batch_size, device=device, )