X-Git-Url: https://fleuret.org/cgi-bin/gitweb/gitweb.cgi?a=blobdiff_plain;f=tasks.py;h=82d965b040becefc4f4933e055fc9e19d3a6976e;hb=994d2408781ebaed6da16b10b2b3ebedeff82756;hp=0f3aaec3ff480ef8209e262baa61e150d23f4be5;hpb=68aa86a6645dfef3f919aad5732a1a09db77bfae;p=culture.git
diff --git a/tasks.py b/tasks.py
index 0f3aaec..82d965b 100755
--- a/tasks.py
+++ b/tasks.py
@@ -30,10 +30,19 @@ def masked_inplace_autoregression(
total=input.size(0) // batch_size,
)
- for input, ar_mask in batches:
- model.masked_inplace_autoregression(
- input, ar_mask, forbidden_tokens, deterministic_synthesis
- )
+ with torch.autograd.no_grad():
+ t = model.training
+ model.eval()
+
+ for input, ar_mask in batches:
+ model.masked_inplace_autoregression(
+ input, ar_mask, forbidden_tokens, deterministic_synthesis
+ )
+
+ model.train(t)
+
+
+######################################################################
class Task:
@@ -82,86 +91,6 @@ class PicoCLVR(Task):
a, b = (i == 0).nonzero().max(), (i == i.max()).nonzero().min()
return z[:, a:b]
- ######################
- # Not the cleanest part of the code
-
- # Extract the last image of each sequence, from the last
- # included, and set to all the tokens from the beginning of
- # that image to the end
- def excise_last_image(self, input):
- t_img, t_nul = self.token2id[""], self.token2id[""]
- nb_img_tokens = self.height * self.width + 1
-
- input = input.clone()
- t = (input == t_img).long()
- tail_masks = (t.cumsum(dim=1) == t.sum(dim=1, keepdim=True)).long()
- i = (t * tail_masks).nonzero(as_tuple=True)
- j = (
- i[0][:, None],
- i[1][:, None] + torch.arange(nb_img_tokens, device=input.device)[None, :],
- )
- images = self.trim(input[j])
- input[j] = t_nul
- loss_masks = 1 - tail_masks
- input, loss_masks = self.trim((input, loss_masks))
- return input, loss_masks, images
-
- def add_true_image(self, input, images, loss_masks):
- t_nul = self.token2id[""]
- nb_img_tokens = self.height * self.width + 1
- input = F.pad(input, (0, nb_img_tokens), value=t_nul)
- loss_masks = F.pad(loss_masks, (0, nb_img_tokens), value=0)
- t = (input == t_nul).long()
- i = (t.cumsum(dim=1) == 1).nonzero(as_tuple=True)
- j = (
- i[0][:, None],
- i[1][:, None] + torch.arange(nb_img_tokens, device=input.device)[None, :],
- )
- input[j] = images
- loss_masks[j] = 1
- input, loss_masks = self.trim((input, loss_masks))
- return input, loss_masks
-
- def add_generated_image(self, input, loss_masks, model, deterministic_synthesis):
- t_img, t_nul = self.token2id[""], self.token2id[""]
- nb_img_tokens = self.height * self.width + 1
-
- input = F.pad(input, (0, nb_img_tokens), value=t_nul)
- loss_masks = F.pad(loss_masks, (0, nb_img_tokens), value=0)
- t = (input == t_nul).long()
- i = (t.cumsum(dim=1) == 1).nonzero(as_tuple=True)
- input[i] = t_img
-
- j = (
- i[0][:, None],
- i[1][:, None]
- + 1
- + torch.arange(nb_img_tokens - 1, device=input.device)[None, :],
- )
- ar_masks = input.new_zeros(input.size(), dtype=torch.int64)
- ar_masks[j] = 1
- forbidden_tokens = (
- torch.arange(self.vocabulary_size(), device=input.device) == t_nul
- )
- with torch.autograd.no_grad():
- t = model.training
- model.eval()
- masked_inplace_autoregression(
- model,
- self.batch_size,
- input,
- ar_masks,
- deterministic_synthesis,
- forbidden_tokens,
- progress_bar_desc=None,
- device=self.device,
- )
- model.train(t)
-
- input, loss_masks = self.trim((input, loss_masks))
-
- return input, loss_masks
-
######################
def __init__(
@@ -193,16 +122,6 @@ class PicoCLVR(Task):
self.pruner_train = pruner_train
self.pruner_eval = pruner_eval
- param = {
- "nb_train_samples": nb_train_samples,
- "nb_test_samples": nb_test_samples,
- "height": height,
- "width": width,
- "nb_colors": nb_colors,
- "batch_size": batch_size,
- "rng_state": list(torch.get_rng_state()),
- }
-
if logger is not None:
logger(
f"generating {nb_train_samples+nb_test_samples} samples (can take some time)"
@@ -225,6 +144,7 @@ class PicoCLVR(Task):
tokens.sort()
self.token2id = dict([(t, n) for n, t in enumerate(tokens)])
self.id2token = dict([(n, t) for n, t in enumerate(tokens)])
+ self.t_img, self.t_nul = self.token2id[""], self.token2id[""]
# Tokenize the train and test sets
self.train_input = self.tensorize(self.train_descr)
@@ -253,11 +173,20 @@ class PicoCLVR(Task):
dynamic_ncols=True,
desc=f"test-properties",
):
- tape, loss_masks, _ = self.excise_last_image(input)
- tape, loss_masks = self.add_generated_image(
- tape, loss_masks, model, deterministic_synthesis
+ result = input.clone()
+ ar_mask = (result == self.t_img).long().cumsum(dim=1).clamp(max=1)
+ result = (1 - ar_mask) * result + ar_mask * self.t_nul
+ masked_inplace_autoregression(
+ model,
+ self.batch_size,
+ result,
+ ar_mask,
+ deterministic_synthesis,
+ progress_bar_desc=None,
+ device=self.device,
)
- result_descr = self.detensorize(tape)
+
+ result_descr = self.detensorize(result)
np = picoclvr.nb_properties(
result_descr,
height=self.height,
@@ -302,14 +231,23 @@ class PicoCLVR(Task):
"red below yellow yellow below green green below blue red right yellow left green right blue left",
"green bottom yellow bottom green left of blue yellow right of blue blue top",
]:
- primer += [primer_descr] * nb_per_primer
+ primer += [primer_descr + " "] * nb_per_primer
- tape = self.tensorize(primer)
- loss_masks = 1 - (tape == self.token2id[""]).long()
- tape, loss_masks = self.add_generated_image(
- tape, loss_masks, model, deterministic_synthesis
+ result = self.tensorize(primer)
+ fill = result.new_full(
+ result.size()[:-1] + (self.height * self.width + 1,), self.t_nul
)
- result_descr = self.detensorize(tape)
+ result = torch.cat((result, fill), 1)
+ ar_mask = (result == self.t_nul).long()
+ masked_inplace_autoregression(
+ model,
+ self.batch_size,
+ result,
+ ar_mask,
+ deterministic_synthesis,
+ device=self.device,
+ )
+ result_descr = self.detensorize(result)
np = picoclvr.nb_properties(result_descr, height=self.height, width=self.width)
@@ -518,70 +456,64 @@ class Maze(Task):
def produce_results(
self, n_epoch, model, result_dir, logger, deterministic_synthesis
):
- with torch.autograd.no_grad():
- t = model.training
- model.eval()
-
- train_nb_total, train_nb_correct, count = self.compute_error(
- model,
- "train",
- nb_to_use=1000,
- deterministic_synthesis=deterministic_synthesis,
- )
- logger(
- f"accuracy_train {n_epoch} nb_total {train_nb_total} nb_correct {train_nb_correct} accuracy {(100.0*train_nb_correct)/train_nb_total:.02f}%"
- )
-
- test_nb_total, test_nb_correct, count = self.compute_error(
- model,
- "test",
- nb_to_use=1000,
- deterministic_synthesis=deterministic_synthesis,
- )
- logger(
- f"accuracy_test {n_epoch} nb_total {test_nb_total} nb_correct {test_nb_correct} accuracy {(100.0*test_nb_correct)/test_nb_total:.02f}%"
- )
+ train_nb_total, train_nb_correct, count = self.compute_error(
+ model,
+ "train",
+ nb_to_use=1000,
+ deterministic_synthesis=deterministic_synthesis,
+ )
+ logger(
+ f"accuracy_train {n_epoch} nb_total {train_nb_total} nb_correct {train_nb_correct} accuracy {(100.0*train_nb_correct)/train_nb_total:.02f}%"
+ )
- if count is not None:
- proportion_optimal = count.diagonal().sum().float() / count.sum()
- logger(f"proportion_optimal_test {proportion_optimal*100:.02f}%")
- with open(
- os.path.join(result_dir, f"maze_result_{n_epoch:04d}.txt"), "w"
- ) as f:
- for i in range(count.size(0)):
- for j in range(count.size(1)):
- eol = " " if j < count.size(1) - 1 else "\n"
- f.write(f"{count[i,j]}{eol}")
-
- input = self.test_input[:48]
- result = input.clone()
- ar_mask = result.new_zeros(result.size())
- ar_mask[:, self.height * self.width :] = 1
- result *= 1 - ar_mask
- masked_inplace_autoregression(
- model,
- self.batch_size,
- result,
- ar_mask,
- deterministic_synthesis,
- device=self.device,
- )
+ test_nb_total, test_nb_correct, count = self.compute_error(
+ model,
+ "test",
+ nb_to_use=1000,
+ deterministic_synthesis=deterministic_synthesis,
+ )
+ logger(
+ f"accuracy_test {n_epoch} nb_total {test_nb_total} nb_correct {test_nb_correct} accuracy {(100.0*test_nb_correct)/test_nb_total:.02f}%"
+ )
- mazes, paths = self.seq2map(input)
- _, predicted_paths = self.seq2map(result)
-
- filename = os.path.join(result_dir, f"maze_result_{n_epoch:04d}.png")
- maze.save_image(
- filename,
- mazes=mazes,
- target_paths=paths,
- predicted_paths=predicted_paths,
- path_correct=maze.path_correctness(mazes, predicted_paths),
- path_optimal=maze.path_optimality(paths, predicted_paths),
- )
- logger(f"wrote {filename}")
+ if count is not None:
+ proportion_optimal = count.diagonal().sum().float() / count.sum()
+ logger(f"proportion_optimal_test {proportion_optimal*100:.02f}%")
+ with open(
+ os.path.join(result_dir, f"maze_result_{n_epoch:04d}.txt"), "w"
+ ) as f:
+ for i in range(count.size(0)):
+ for j in range(count.size(1)):
+ eol = " " if j < count.size(1) - 1 else "\n"
+ f.write(f"{count[i,j]}{eol}")
+
+ input = self.test_input[:48]
+ result = input.clone()
+ ar_mask = result.new_zeros(result.size())
+ ar_mask[:, self.height * self.width :] = 1
+ result *= 1 - ar_mask
+ masked_inplace_autoregression(
+ model,
+ self.batch_size,
+ result,
+ ar_mask,
+ deterministic_synthesis,
+ device=self.device,
+ )
- model.train(t)
+ mazes, paths = self.seq2map(input)
+ _, predicted_paths = self.seq2map(result)
+
+ filename = os.path.join(result_dir, f"maze_result_{n_epoch:04d}.png")
+ maze.save_image(
+ filename,
+ mazes=mazes,
+ target_paths=paths,
+ predicted_paths=predicted_paths,
+ path_correct=maze.path_correctness(mazes, predicted_paths),
+ path_optimal=maze.path_optimality(paths, predicted_paths),
+ )
+ logger(f"wrote {filename}")
######################################################################
@@ -648,59 +580,51 @@ class Snake(Task):
def produce_results(
self, n_epoch, model, result_dir, logger, deterministic_synthesis
):
- with torch.autograd.no_grad():
- t = model.training
- model.eval()
-
- def compute_nb_correct(input, prior_visits):
- result = input.clone()
- i = torch.arange(result.size(1), device=result.device)[None, :]
- ar_mask = (
- torch.logical_and(i >= self.prompt_length * 2, i % 2 == 0)
- .long()
- .expand_as(result)
- )
- result *= 1 - ar_mask
+ def compute_nb_correct(input, prior_visits):
+ result = input.clone()
+ i = torch.arange(result.size(1), device=result.device)[None, :]
+ ar_mask = (
+ torch.logical_and(i >= self.prompt_length * 2, i % 2 == 0)
+ .long()
+ .expand_as(result)
+ )
+ result *= 1 - ar_mask
- # snake.solver(result,ar_mask)
+ # snake.solver(result,ar_mask)
- masked_inplace_autoregression(
- model,
- self.batch_size,
- result,
- ar_mask,
- deterministic_synthesis,
- device=self.device,
- )
-
- nb_total = ((prior_visits > 0) * ar_mask).sum()
+ masked_inplace_autoregression(
+ model,
+ self.batch_size,
+ result,
+ ar_mask,
+ deterministic_synthesis,
+ device=self.device,
+ )
- nb_correct = (
- (result == input).long() * (prior_visits > 0) * ar_mask
- ).sum()
+ nb_total = ((prior_visits > 0) * ar_mask).sum()
- # nb_total = result.size(0)
- # nb_correct = ((result - input).abs().sum(1) == 0).sum()
+ nb_correct = ((result == input).long() * (prior_visits > 0) * ar_mask).sum()
- return nb_total, nb_correct
+ # nb_total = result.size(0)
+ # nb_correct = ((result - input).abs().sum(1) == 0).sum()
- # train_nb_total, train_nb_correct = compute_nb_correct(
- # self.train_input, self.train_prior_visits
- # )
+ return nb_total, nb_correct
- # logger(
- # f"accuracy_train nb_total {train_nb_total} nb_correct {train_nb_correct} accuracy {(100.0*train_nb_correct)/train_nb_total:.02f}%"
- # )
+ # train_nb_total, train_nb_correct = compute_nb_correct(
+ # self.train_input, self.train_prior_visits
+ # )
- test_nb_total, test_nb_correct = compute_nb_correct(
- self.test_input[:1000], self.test_prior_visits[:1000]
- )
+ # logger(
+ # f"accuracy_train nb_total {train_nb_total} nb_correct {train_nb_correct} accuracy {(100.0*train_nb_correct)/train_nb_total:.02f}%"
+ # )
- logger(
- f"accuracy_test {n_epoch} nb_total {test_nb_total} nb_correct {test_nb_correct} accuracy {(100.0*test_nb_correct)/test_nb_total:.02f}%"
- )
+ test_nb_total, test_nb_correct = compute_nb_correct(
+ self.test_input[:1000], self.test_prior_visits[:1000]
+ )
- model.train(t)
+ logger(
+ f"accuracy_test {n_epoch} nb_total {test_nb_total} nb_correct {test_nb_correct} accuracy {(100.0*test_nb_correct)/test_nb_total:.02f}%"
+ )
######################################################################
@@ -780,64 +704,61 @@ class Stack(Task):
def produce_results(
self, n_epoch, model, result_dir, logger, deterministic_synthesis
):
- with torch.autograd.no_grad():
- t = model.training
- model.eval()
-
- def compute_nb_correct(input):
- result = input.clone()
- stack.remove_popped_values(result, self.nb_stacks, self.nb_digits)
- ar_mask = (result != input).long()
- masked_inplace_autoregression(
- model,
- self.batch_size,
- result,
- ar_mask,
- deterministic_synthesis,
- device=self.device,
- )
+ def compute_nb_correct(input):
+ result = input.clone()
+ stack.remove_popped_values(result, self.nb_stacks, self.nb_digits)
+ ar_mask = (result != input).long()
+ masked_inplace_autoregression(
+ model,
+ self.batch_size,
+ result,
+ ar_mask,
+ deterministic_synthesis,
+ device=self.device,
+ )
- errors = ((result != input).long() * ar_mask).reshape(
- -1, 1 + self.nb_digits
- )
- ar_mask = ar_mask.reshape(-1, 1 + self.nb_digits)
+ errors = ((result != input).long() * ar_mask).reshape(
+ -1, 1 + self.nb_digits
+ )
+ ar_mask = ar_mask.reshape(-1, 1 + self.nb_digits)
- nb_total = ar_mask.max(1).values.sum()
- nb_correct = nb_total - errors.max(1).values.sum()
+ nb_total = ar_mask.max(1).values.sum()
+ nb_correct = nb_total - errors.max(1).values.sum()
- return nb_total, nb_correct
+ return nb_total, nb_correct
- test_nb_total, test_nb_correct = compute_nb_correct(self.test_input[:1000])
+ test_nb_total, test_nb_correct = compute_nb_correct(self.test_input[:1000])
- logger(
- f"accuracy_test {n_epoch} nb_total {test_nb_total} nb_correct {test_nb_correct} accuracy {(100.0*test_nb_correct)/test_nb_total:.02f}%"
- )
+ logger(
+ f"accuracy_test {n_epoch} nb_total {test_nb_total} nb_correct {test_nb_correct} accuracy {(100.0*test_nb_correct)/test_nb_total:.02f}%"
+ )
- ##############################################################
- # Log a few generated sequences
- input = self.test_input[:10, : 12 * (1 + self.nb_digits)]
- result = input.clone()
- stack.remove_popped_values(result, self.nb_stacks, self.nb_digits)
- ar_mask = (result != input).long()
- for n in range(result.size(0)):
- logger(
- f"test_before {stack.seq_to_str(result[n],nb_stacks=self.nb_stacks,nb_digits=self.nb_digits)}"
- )
- masked_inplace_autoregression(
- model,
- self.batch_size,
- result,
- ar_mask,
- deterministic_synthesis,
- device=self.device,
- )
- for n in range(result.size(0)):
- logger(
- f"test_after {stack.seq_to_str(result[n],nb_stacks=self.nb_stacks,nb_digits=self.nb_digits)}"
- )
- ##############################################################
+ ##############################################################
+ # Log a few generated sequences
+ input = self.test_input[:10, : 12 * (1 + self.nb_digits)]
+ result = input.clone()
+ stack.remove_popped_values(result, self.nb_stacks, self.nb_digits)
+ ar_mask = (result != input).long()
- model.train(t)
+ # for n in range(result.size(0)):
+ # logger(
+ # f"test_before {stack.seq_to_str(result[n],nb_stacks=self.nb_stacks,nb_digits=self.nb_digits)}"
+ # )
+
+ masked_inplace_autoregression(
+ model,
+ self.batch_size,
+ result,
+ ar_mask,
+ deterministic_synthesis,
+ device=self.device,
+ )
+
+ for n in range(result.size(0)):
+ logger(
+ f"test_after {stack.seq_to_str(result[n],nb_stacks=self.nb_stacks,nb_digits=self.nb_digits)}"
+ )
+ ##############################################################
######################################################################
@@ -847,12 +768,28 @@ import expr
class Expr(Task):
+ def tensorize(self, sequences):
+ len_max = max([len(x) for x in sequences])
+ return torch.cat(
+ [
+ torch.tensor(
+ [
+ [self.char2id[c] for c in s + "#" * (len_max - len(s))]
+ for s in sequences
+ ]
+ )
+ ],
+ 0,
+ ).to(self.device)
+
def __init__(
self,
nb_train_samples,
nb_test_samples,
nb_variables,
sequence_length,
+ operand_max,
+ result_max,
batch_size,
device=torch.device("cpu"),
):
@@ -863,51 +800,28 @@ class Expr(Task):
nb_train_samples,
nb_variables=nb_variables,
length=sequence_length,
- # length=2 * sequence_length,
- # randomize_length=True,
+ operand_max=operand_max,
+ result_max=result_max,
)
+
test_sequences = expr.generate_sequences(
nb_test_samples,
nb_variables=nb_variables,
length=sequence_length,
+ operand_max=operand_max,
+ result_max=result_max,
)
- self.char2id = dict(
- [
- (c, n)
- for n, c in enumerate(
- set("#" + "".join(train_sequences + test_sequences))
- )
- ]
- )
+
+ symbols = list(set("#" + "".join(train_sequences + test_sequences)))
+ symbols.sort()
+
+ self.char2id = dict([(c, n) for n, c in enumerate(symbols)])
self.id2char = dict([(n, c) for c, n in self.char2id.items()])
self.filler, self.space = self.char2id["#"], self.char2id[" "]
- len_max = max([len(x) for x in train_sequences])
- self.train_input = torch.cat(
- [
- torch.tensor(
- [
- [self.char2id[c] for c in s + "#" * (len_max - len(s))]
- for s in train_sequences
- ]
- )
- ],
- 0,
- ).to(device)
-
- len_max = max([len(x) for x in test_sequences])
- self.test_input = torch.cat(
- [
- torch.tensor(
- [
- [self.char2id[c] for c in s + "#" * (len_max - len(s))]
- for s in test_sequences
- ]
- )
- ],
- 0,
- ).to(device)
+ self.train_input = self.tensorize(train_sequences)
+ self.test_input = self.tensorize(test_sequences)
self.nb_codes = max(self.train_input.max(), self.test_input.max()) + 1
@@ -921,9 +835,8 @@ class Expr(Task):
for batch in tqdm.tqdm(
input.split(self.batch_size), dynamic_ncols=True, desc=desc
):
- if split == "train":
- last = (batch != self.filler).max(0).values.nonzero().max() + 3
- batch = batch[:, :last]
+ last = (batch != self.filler).max(0).values.nonzero().max() + 3
+ batch = batch[:, :last]
yield batch
def vocabulary_size(self):
@@ -933,97 +846,109 @@ class Expr(Task):
return "".join([self.id2char[k.item()] for k in s])
def produce_results(
- self, n_epoch, model, result_dir, logger, deterministic_synthesis
+ self,
+ n_epoch,
+ model,
+ result_dir,
+ logger,
+ deterministic_synthesis,
+ input_file=None,
):
- with torch.autograd.no_grad():
- t = model.training
- model.eval()
-
- def compute_nb_correct(input):
- result = input.clone()
- ar_mask = (result == self.space).long().cumsum(dim=1).clamp(max=1)
- result = (1 - ar_mask) * result + ar_mask * self.filler
- masked_inplace_autoregression(
- model,
- self.batch_size,
- result,
- ar_mask,
- deterministic_synthesis,
- device=self.device,
- )
+ def compute_nb_correct(input):
+ result = input.clone()
+ s = (result == self.space).long()
+ ar_mask = (s.cumsum(dim=1) - s).clamp(min=0, max=1)
+ result = (1 - ar_mask) * result + ar_mask * self.filler
+ masked_inplace_autoregression(
+ model,
+ self.batch_size,
+ result,
+ ar_mask,
+ deterministic_synthesis,
+ device=self.device,
+ )
- nb_total = input.size(0)
- nb_correct = (input == result).long().min(1).values.sum()
+ nb_total = input.size(0)
+ nb_correct = (input == result).long().min(1).values.sum()
- #######################################################################
- # Comput predicted vs. true variable values
+ #######################################################################
+ # Comput predicted vs. true variable values
- nb_delta = torch.zeros(5, dtype=torch.int64)
- nb_missed = 0
+ nb_delta = torch.zeros(5, dtype=torch.int64)
+ nb_missed = 0
- values_input = expr.extract_results([self.seq2str(s) for s in input])
- values_result = expr.extract_results([self.seq2str(s) for s in result])
+ values_input = expr.extract_results([self.seq2str(s) for s in input])
+ values_result = expr.extract_results([self.seq2str(s) for s in result])
- for i, r in zip(values_input, values_result):
- for n, vi in i.items():
- vr = r.get(n)
- if vr is None or vr < 0:
+ for i, r in zip(values_input, values_result):
+ for n, vi in i.items():
+ vr = r.get(n)
+ if vr is None or vr < 0:
+ nb_missed += 1
+ else:
+ d = abs(vr - vi)
+ if d >= nb_delta.size(0):
nb_missed += 1
else:
- d = abs(vr - vi)
- if d >= nb_delta.size(0):
- nb_missed += 1
- else:
- nb_delta[d] += 1
+ nb_delta[d] += 1
- ######################################################################
+ ######################################################################
- return nb_total, nb_correct, nb_delta, nb_missed
+ return nb_total, nb_correct, nb_delta, nb_missed
- (
- test_nb_total,
- test_nb_correct,
- test_nb_delta,
- test_nb_missed,
- ) = compute_nb_correct(self.test_input[:1000])
+ (
+ test_nb_total,
+ test_nb_correct,
+ test_nb_delta,
+ test_nb_missed,
+ ) = compute_nb_correct(self.test_input[:10000])
- logger(
- f"accuracy_test {n_epoch} nb_total {test_nb_total} nb_correct {test_nb_correct} accuracy {(100.0*test_nb_correct)/test_nb_total:.02f}%"
- )
+ logger(
+ f"accuracy_test {n_epoch} nb_total {test_nb_total} nb_correct {test_nb_correct} accuracy {(100.0*test_nb_correct)/test_nb_total:.02f}%"
+ )
- nb_total = test_nb_delta.sum() + test_nb_missed
- for d in range(test_nb_delta.size(0)):
- logger(
- f"error_value {n_epoch} delta {d} {test_nb_delta[d]} {test_nb_delta[d]*100/nb_total:.02f}%"
- )
+ nb_total = test_nb_delta.sum() + test_nb_missed
+ for d in range(test_nb_delta.size(0)):
logger(
- f"error_value {n_epoch} missed {test_nb_missed} {test_nb_missed*100/nb_total:.02f}%"
+ f"error_value {n_epoch} delta {d} {test_nb_delta[d]} {test_nb_delta[d]*100/nb_total:.02f}%"
)
+ logger(
+ f"error_value {n_epoch} missed {test_nb_missed} {test_nb_missed*100/nb_total:.02f}%"
+ )
- ##############################################################
- # Log a few generated sequences
+ ##############################################################
+ # Log a few generated sequences
+ if input_file is None:
input = self.test_input[:10]
- result = input.clone()
- ar_mask = (result == self.space).long().cumsum(dim=1).clamp(max=1)
- result = (1 - ar_mask) * result + ar_mask * self.filler
- for n in range(result.size(0)):
- logger(f"test_before {self.seq2str(result[n])}")
- masked_inplace_autoregression(
- model,
- self.batch_size,
- result,
- ar_mask,
- deterministic_synthesis,
- device=self.device,
- )
- correct = (1 - ar_mask) * self.space + ar_mask * input
- for n in range(result.size(0)):
- comment = "GOOD" if (result[n] - input[n]).abs().max() == 0 else ""
- logger(f"test_after {self.seq2str(result[n])} {comment}")
- logger(f"correct {self.seq2str(correct[n])}")
- ##############################################################
-
- model.train(t)
+ else:
+ with open(input_file, "r") as f:
+ sequences = [e.strip() for e in f.readlines()]
+ sequences = [s + " " + "#" * 50 for s in sequences]
+ input = self.tensorize(sequences)
+
+ result = input.clone()
+ s = (result == self.space).long()
+ ar_mask = (s.cumsum(dim=1) - s).clamp(min=0, max=1)
+ result = (1 - ar_mask) * result + ar_mask * self.filler
+
+ for n in range(result.size(0)):
+ logger(f"test_before {self.seq2str(result[n])}")
+
+ masked_inplace_autoregression(
+ model,
+ self.batch_size,
+ result,
+ ar_mask,
+ deterministic_synthesis,
+ device=self.device,
+ )
+
+ correct = (1 - ar_mask) * self.space + ar_mask * input
+ for n in range(result.size(0)):
+ comment = "GOOD" if (result[n] - input[n]).abs().max() == 0 else ""
+ logger(f"test_after {self.seq2str(result[n])} {comment}")
+ logger(f"truth {self.seq2str(correct[n])}")
+ ##############################################################
######################################################################