X-Git-Url: https://fleuret.org/cgi-bin/gitweb/gitweb.cgi?a=blobdiff_plain;f=main.py;h=e1f619c03712395232847851ca168440131e68df;hb=02c4828834319a5b7818bafb8821fce66b3a1bb1;hp=319e94b1856f49f322d593bc902bebfc09a6a2d3;hpb=b5fd9b344c8c782460941c604b6e637d7549fe7d;p=picoclvr.git diff --git a/main.py b/main.py index 319e94b..e1f619c 100755 --- a/main.py +++ b/main.py @@ -32,7 +32,10 @@ parser = argparse.ArgumentParser( ) parser.add_argument( - "--task", type=str, default="picoclvr", help="picoclvr, mnist, maze, snake, stack, expr" + "--task", + type=str, + default="picoclvr", + help="picoclvr, mnist, maze, snake, stack, expr", ) parser.add_argument("--log_filename", type=str, default="train.log", help=" ") @@ -117,6 +120,13 @@ parser.add_argument("--stack_nb_digits", type=int, default=3) parser.add_argument("--stack_fraction_values_for_train", type=float, default=None) +############################## +# Expr options + +parser.add_argument("--expr_nb_variables", type=int, default=5) + +parser.add_argument("--expr_sequence_length", type=int, default=30) + ###################################################################### args = parser.parse_args() @@ -160,10 +170,10 @@ default_args = { "nb_test_samples": 1000, }, "expr": { - "nb_epochs": 5, + "nb_epochs": 50, "batch_size": 25, - "nb_train_samples": 100000, - "nb_test_samples": 1000, + "nb_train_samples": 250000, + "nb_test_samples": 10000, }, } @@ -223,7 +233,6 @@ def masked_inplace_autoregression( progress_bar_desc="autoregression", device=torch.device("cpu"), ): - batches = zip(input.split(batch_size), ar_mask.split(batch_size)) if progress_bar_desc is not None: @@ -1010,19 +1019,64 @@ class TaskExpr(Task): self, nb_train_samples, nb_test_samples, + nb_variables, + sequence_length, batch_size, device=torch.device("cpu"), ): self.batch_size = batch_size self.device = device - train_sequences = expr.generate_sequences(nb_train_samples) - test_sequences = expr.generate_sequences(nb_test_samples) - self.char2id = dict([ (c,n) for n,c in enumerate(set("".join(train_sequences + test_sequences))) ]) - self.id2char = dict([ (n,c) for n,c in self.char2id.items() ]) - len_max = max([len(x) for x in train_sequences + test_sequences]) - self.train_input = torch.cat([torch.tensor([char2id(c) for c in s + " "*(len_max-len(s))] for s in train_sequences)], 0) - self.test_input = torch.cat([torch.tensor([char2id(c) for c in s + " "*(len_max-len(s))] for s in test_sequences)], 0) + train_sequences = expr.generate_sequences( + nb_train_samples, + nb_variables=nb_variables, + length=sequence_length, + # length=2 * sequence_length, + # randomize_length=True, + ) + test_sequences = expr.generate_sequences( + nb_test_samples, + nb_variables=nb_variables, + length=sequence_length, + ) + self.char2id = dict( + [ + (c, n) + for n, c in enumerate( + set("#" + "".join(train_sequences + test_sequences)) + ) + ] + ) + self.id2char = dict([(n, c) for c, n in self.char2id.items()]) + + self.filler, self.space = self.char2id["#"], self.char2id[" "] + + len_max = max([len(x) for x in train_sequences]) + self.train_input = torch.cat( + [ + torch.tensor( + [ + [self.char2id[c] for c in s + "#" * (len_max - len(s))] + for s in train_sequences + ] + ) + ], + 0, + ).to(device) + + len_max = max([len(x) for x in test_sequences]) + self.test_input = torch.cat( + [ + torch.tensor( + [ + [self.char2id[c] for c in s + "#" * (len_max - len(s))] + for s in test_sequences + ] + ) + ], + 0, + ).to(device) + self.nb_codes = max(self.train_input.max(), self.test_input.max()) + 1 def batches(self, split="train", nb_to_use=-1, desc=None): @@ -1035,32 +1089,50 @@ class TaskExpr(Task): for batch in tqdm.tqdm( input.split(self.batch_size), dynamic_ncols=True, desc=desc ): + if split == "train": + last = (batch != self.filler).max(0).values.nonzero().max() + 1 + batch = batch[:, :last] yield batch def vocabulary_size(self): return self.nb_codes + def seq2str(self, s): + return "".join([self.id2char[k.item()] for k in s]) + def produce_results(self, n_epoch, model): - # !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!! with torch.autograd.no_grad(): t = model.training model.eval() def compute_nb_correct(input): result = input.clone() - stack.remove_popped_values(result, self.nb_stacks, self.nb_digits) - ar_mask = (result != input).long() + ar_mask = (result == self.space).long().cumsum(dim=1).clamp(max=1) + result = (1 - ar_mask) * result + ar_mask * self.filler masked_inplace_autoregression( model, self.batch_size, result, ar_mask, device=self.device ) - errors = ((result != input).long() * ar_mask).reshape( - -1, 1 + self.nb_digits + nb_total = input.size(0) + nb_correct = (input == result).long().min(1).values.sum() + + values_input = expr.extract_results([self.seq2str(s) for s in input]) + max_input = max([max(x.values()) for x in values_input]) + values_result = expr.extract_results([self.seq2str(s) for s in result]) + max_result = max( + [-1 if len(x) == 0 else max(x.values()) for x in values_result] ) - ar_mask = ar_mask.reshape(-1, 1 + self.nb_digits) - nb_total = ar_mask.max(1).values.sum() - nb_correct = nb_total - errors.max(1).values.sum() + nb_missing, nb_predicted = torch.zeros(max_input + 1), torch.zeros( + max_input + 1, max_result + 1 + ) + for i, r in zip(values_input, values_result): + for n, vi in i.items(): + vr = r.get(n) + if vr is None or vr < 0: + nb_missing[vi] += 1 + else: + nb_predicted[vi, vr] += 1 return nb_total, nb_correct @@ -1072,21 +1144,20 @@ class TaskExpr(Task): ############################################################## # Log a few generated sequences - input = self.test_input[:10, : 12 * (1 + self.nb_digits)] + input = self.test_input[:10] result = input.clone() - stack.remove_popped_values(result, self.nb_stacks, self.nb_digits) - ar_mask = (result != input).long() + ar_mask = (result == self.space).long().cumsum(dim=1).clamp(max=1) + result = (1 - ar_mask) * result + ar_mask * self.filler for n in range(result.size(0)): - log_string( - f"test_before {stack.seq_to_str(result[n],nb_stacks=self.nb_stacks,nb_digits=self.nb_digits)}" - ) + log_string(f"test_before {self.seq2str(result[n])}") masked_inplace_autoregression( model, self.batch_size, result, ar_mask, device=self.device ) + correct = (1 - ar_mask) * self.space + ar_mask * input for n in range(result.size(0)): - log_string( - f"test_after {stack.seq_to_str(result[n],nb_stacks=self.nb_stacks,nb_digits=self.nb_digits)}" - ) + comment = "GOOD" if (result[n] - input[n]).abs().max() == 0 else "" + log_string(f"test_after {self.seq2str(result[n])} {comment}") + log_string(f"correct {self.seq2str(correct[n])}") ############################################################## model.train(t) @@ -1172,6 +1243,8 @@ elif args.task == "expr": task = TaskExpr( nb_train_samples=args.nb_train_samples, nb_test_samples=args.nb_test_samples, + nb_variables=args.expr_nb_variables, + sequence_length=args.expr_sequence_length, batch_size=args.batch_size, device=device, )