X-Git-Url: https://fleuret.org/cgi-bin/gitweb/gitweb.cgi?a=blobdiff_plain;f=main.py;h=9dee679fbf1bdcda0faac54cc77179072c4ad0a4;hb=ca5b98d1517b8ce2367887bbad2205f27d55e0b3;hp=beafc19e55ecd1c89c9d43fadf2a9e93a6cd44a6;hpb=f29d0fa816414f74efed3b9ccdad56fdbd346298;p=picoclvr.git diff --git a/main.py b/main.py index beafc19..9dee679 100755 --- a/main.py +++ b/main.py @@ -170,10 +170,10 @@ default_args = { "nb_test_samples": 1000, }, "expr": { - "nb_epochs": 5, + "nb_epochs": 50, "batch_size": 25, - "nb_train_samples": 100000, - "nb_test_samples": 1000, + "nb_train_samples": 250000, + "nb_test_samples": 10000, }, } @@ -1028,10 +1028,16 @@ class TaskExpr(Task): self.device = device train_sequences = expr.generate_sequences( - nb_train_samples, nb_variables=nb_variables, length=sequence_length + nb_train_samples, + nb_variables=nb_variables, + length=sequence_length, + # length=2 * sequence_length, + # randomize_length=True, ) test_sequences = expr.generate_sequences( - nb_test_samples, nb_variables=nb_variables, length=sequence_length + nb_test_samples, + nb_variables=nb_variables, + length=sequence_length, ) self.char2id = dict( [ @@ -1042,7 +1048,10 @@ class TaskExpr(Task): ] ) self.id2char = dict([(n, c) for c, n in self.char2id.items()]) - len_max = max([len(x) for x in train_sequences + test_sequences]) + + self.filler, self.space = self.char2id["#"], self.char2id[" "] + + len_max = max([len(x) for x in train_sequences]) self.train_input = torch.cat( [ torch.tensor( @@ -1054,6 +1063,8 @@ class TaskExpr(Task): ], 0, ).to(device) + + len_max = max([len(x) for x in test_sequences]) self.test_input = torch.cat( [ torch.tensor( @@ -1065,6 +1076,7 @@ class TaskExpr(Task): ], 0, ).to(device) + self.nb_codes = max(self.train_input.max(), self.test_input.max()) + 1 def batches(self, split="train", nb_to_use=-1, desc=None): @@ -1077,11 +1089,17 @@ class TaskExpr(Task): for batch in tqdm.tqdm( input.split(self.batch_size), dynamic_ncols=True, desc=desc ): + if split == "train": + last = (batch != self.filler).max(0).values.nonzero().max() + 1 + batch = batch[:, :last] yield batch def vocabulary_size(self): return self.nb_codes + def seq2str(self, s): + return "".join([self.id2char[k.item()] for k in s]) + def produce_results(self, n_epoch, model): with torch.autograd.no_grad(): t = model.training @@ -1089,40 +1107,76 @@ class TaskExpr(Task): def compute_nb_correct(input): result = input.clone() - filler, space = self.char2id["#"], self.char2id[" "] - ar_mask = (result == space).long().cumsum(dim=1).clamp(max=1) - result = (1 - ar_mask) * result + filler * ar_mask + ar_mask = (result == self.space).long().cumsum(dim=1).clamp(max=1) + result = (1 - ar_mask) * result + ar_mask * self.filler masked_inplace_autoregression( model, self.batch_size, result, ar_mask, device=self.device ) - nb_total = ar_mask.sum() - nb_correct = ((input == result).long() * ar_mask).sum() + nb_total = input.size(0) + nb_correct = (input == result).long().min(1).values.sum() - return nb_total, nb_correct + ####################################################################### + # Comput predicted vs. true variable values - test_nb_total, test_nb_correct = compute_nb_correct(self.test_input[:1000]) + nb_delta = torch.zeros(5, dtype=torch.int64) + nb_missed = 0 + + values_input = expr.extract_results([self.seq2str(s) for s in input]) + values_result = expr.extract_results([self.seq2str(s) for s in result]) + + for i, r in zip(values_input, values_result): + for n, vi in i.items(): + vr = r.get(n) + if vr is None or vr < 0: + nb_missed += 1 + else: + d = abs(vr - vi) + if d >= nb_delta.size(0): + nb_missed += 1 + else: + nb_delta[d] += 1 + + ###################################################################### + + return nb_total, nb_correct, nb_delta, nb_missed + + ( + test_nb_total, + test_nb_correct, + test_nb_delta, + test_nb_missed, + ) = compute_nb_correct(self.test_input[:1000]) log_string( f"accuracy_test {n_epoch} nb_total {test_nb_total} nb_correct {test_nb_correct} accuracy {(100.0*test_nb_correct)/test_nb_total:.02f}%" ) + nb_total = test_nb_delta.sum() + test_nb_missed + for d in range(test_nb_delta.size(0)): + log_string( + f"error_value {n_epoch} delta {d} {test_nb_delta[d]} {test_nb_delta[d]*100/nb_total:.02f}%" + ) + log_string( + f"error_value {n_epoch} missed {test_nb_missed} {test_nb_missed*100/nb_total:.02f}%" + ) + ############################################################## # Log a few generated sequences input = self.test_input[:10] result = input.clone() - filler, space = self.char2id["#"], self.char2id[" "] - ar_mask = (result == space).long().cumsum(dim=1).clamp(max=1) - result = (1 - ar_mask) * result + filler * ar_mask + ar_mask = (result == self.space).long().cumsum(dim=1).clamp(max=1) + result = (1 - ar_mask) * result + ar_mask * self.filler for n in range(result.size(0)): - s = "".join([self.id2char[k.item()] for k in result[n]]) - log_string(f"test_before {s}") + log_string(f"test_before {self.seq2str(result[n])}") masked_inplace_autoregression( model, self.batch_size, result, ar_mask, device=self.device ) + correct = (1 - ar_mask) * self.space + ar_mask * input for n in range(result.size(0)): - s = "".join([self.id2char[k.item()] for k in result[n]]) - log_string(f"test_after {s}") + comment = "GOOD" if (result[n] - input[n]).abs().max() == 0 else "" + log_string(f"test_after {self.seq2str(result[n])} {comment}") + log_string(f"correct {self.seq2str(correct[n])}") ############################################################## model.train(t)