- for n in range(result.size(0)):
- logger(
- f"test_after {stack.seq_to_str(result[n],nb_stacks=self.nb_stacks,nb_digits=self.nb_digits)}"
- )
- ##############################################################
-
- model.train(t)
-
-
-######################################################################
-
-
-import expr
-
-
-class Expr(Task):
- def tensorize(self, sequences):
- len_max = max([len(x) for x in sequences])
- return torch.cat(
- [
- torch.tensor(
- [
- [self.char2id[c] for c in s + "#" * (len_max - len(s))]
- for s in sequences
- ]
- )
- ],
- 0,
- ).to(self.device)
-
- def __init__(
- self,
- nb_train_samples,
- nb_test_samples,
- nb_variables,
- sequence_length,
- batch_size,
- device=torch.device("cpu"),
- ):
- self.batch_size = batch_size
- self.device = device
-
- train_sequences = expr.generate_sequences(
- nb_train_samples,
- nb_variables=nb_variables,
- length=sequence_length,
- # length=2 * sequence_length,
- # randomize_length=True,
- )
- test_sequences = expr.generate_sequences(
- nb_test_samples,
- nb_variables=nb_variables,
- length=sequence_length,
- )
-
- symbols = list(set("#" + "".join(train_sequences + test_sequences)))
- symbols.sort()
-
- self.char2id = dict([(c, n) for n, c in enumerate(symbols)])
- self.id2char = dict([(n, c) for c, n in self.char2id.items()])
-
- self.filler, self.space = self.char2id["#"], self.char2id[" "]
-
- self.train_input = self.tensorize(train_sequences)
- self.test_input = self.tensorize(test_sequences)
-
- self.nb_codes = max(self.train_input.max(), self.test_input.max()) + 1
-
- def batches(self, split="train", nb_to_use=-1, desc=None):
- assert split in {"train", "test"}
- input = self.train_input if split == "train" else self.test_input
- if nb_to_use > 0:
- input = input[:nb_to_use]
- if desc is None:
- desc = f"epoch-{split}"
- for batch in tqdm.tqdm(
- input.split(self.batch_size), dynamic_ncols=True, desc=desc
- ):
- if split == "train":
- last = (batch != self.filler).max(0).values.nonzero().max() + 3
- batch = batch[:, :last]
- yield batch
-
- def vocabulary_size(self):
- return self.nb_codes
-
- def seq2str(self, s):
- return "".join([self.id2char[k.item()] for k in s])
-
- def produce_results(
- self,
- n_epoch,
- model,
- result_dir,
- logger,
- deterministic_synthesis,
- input_file=None,
- ):
- with torch.autograd.no_grad():
- t = model.training
- model.eval()
-
- def compute_nb_correct(input):
- result = input.clone()
- ar_mask = (result == self.space).long().cumsum(dim=1).clamp(max=1)
- result = (1 - ar_mask) * result + ar_mask * self.filler
- masked_inplace_autoregression(
- model,
- self.batch_size,
- result,
- ar_mask,
- deterministic_synthesis,
- device=self.device,
- )
-
- nb_total = input.size(0)
- nb_correct = (input == result).long().min(1).values.sum()
-
- #######################################################################
- # Comput predicted vs. true variable values
-
- nb_delta = torch.zeros(5, dtype=torch.int64)
- nb_missed = 0
-
- values_input = expr.extract_results([self.seq2str(s) for s in input])
- values_result = expr.extract_results([self.seq2str(s) for s in result])
-
- for i, r in zip(values_input, values_result):
- for n, vi in i.items():
- vr = r.get(n)
- if vr is None or vr < 0:
- nb_missed += 1
- else:
- d = abs(vr - vi)
- if d >= nb_delta.size(0):
- nb_missed += 1
- else:
- nb_delta[d] += 1
-
- ######################################################################
-
- return nb_total, nb_correct, nb_delta, nb_missed
-
- (
- test_nb_total,
- test_nb_correct,
- test_nb_delta,
- test_nb_missed,
- ) = compute_nb_correct(self.test_input[:1000])
-
- logger(
- f"accuracy_test {n_epoch} nb_total {test_nb_total} nb_correct {test_nb_correct} accuracy {(100.0*test_nb_correct)/test_nb_total:.02f}%"
- )
-
- nb_total = test_nb_delta.sum() + test_nb_missed
- for d in range(test_nb_delta.size(0)):
- logger(
- f"error_value {n_epoch} delta {d} {test_nb_delta[d]} {test_nb_delta[d]*100/nb_total:.02f}%"
- )
- logger(
- f"error_value {n_epoch} missed {test_nb_missed} {test_nb_missed*100/nb_total:.02f}%"