5 import torch, torchvision
8 from torch.nn import functional as F
10 ######################################################################
13 def masked_inplace_autoregression(
18 deterministic_synthesis,
19 forbidden_tokens=None,
20 progress_bar_desc="autoregression",
21 device=torch.device("cpu"),
23 batches = zip(input.split(batch_size), ar_mask.split(batch_size))
25 if progress_bar_desc is not None:
29 desc=progress_bar_desc,
30 total=input.size(0) // batch_size,
33 for input, ar_mask in batches:
34 model.masked_inplace_autoregression(
35 input, ar_mask, forbidden_tokens, deterministic_synthesis
40 def batches(self, split="train"):
43 def vocabulary_size(self):
47 self, n_epoch, model, result_dir, logger, deterministic_synthesis
52 ######################################################################
58 # Make a tensor from a list of strings
59 def tensorize(self, descr):
60 token_descr = [s.strip().split(" ") for s in descr]
61 l = max([len(s) for s in token_descr])
62 token_descr = [s + ["<nul>"] * (l - len(s)) for s in token_descr]
63 id_descr = [[self.token2id[u] for u in s] for s in token_descr]
64 return torch.tensor(id_descr, device=self.device)
66 # Make a list of strings from a tensor
67 def detensorize(self, x):
68 return [" ".join([self.id2token[t.item()] for t in r]) for r in x]
70 # trim all the tensors in the tuple z to remove as much token from
71 # left and right in the first tensor. If z is a tuple, all its
72 # elements are trimed according to the triming for the first
73 def trim(self, z, token="<nul>"):
74 n = self.token2id[token]
77 i = (1 - (F.pad(x, (1, 1), value=n) == n).min(0).values.long()).cumsum(0)
78 a, b = (i == 0).nonzero().max(), (i == i.max()).nonzero().min()
79 return tuple([t[:, a:b] for t in z])
81 i = (1 - (F.pad(z, (1, 1), value=n) == n).min(0).values.long()).cumsum(0)
82 a, b = (i == 0).nonzero().max(), (i == i.max()).nonzero().min()
85 ######################
86 # Not the cleanest part of the code
88 # Extract the last image of each sequence, from the last <img>
89 # included, and set to <nul> all the tokens from the beginning of
90 # that image to the end
91 def excise_last_image(self, input):
92 t_img, t_nul = self.token2id["<img>"], self.token2id["<nul>"]
93 nb_img_tokens = self.height * self.width + 1
96 t = (input == t_img).long()
97 tail_masks = (t.cumsum(dim=1) == t.sum(dim=1, keepdim=True)).long()
98 i = (t * tail_masks).nonzero(as_tuple=True)
101 i[1][:, None] + torch.arange(nb_img_tokens, device=input.device)[None, :],
103 images = self.trim(input[j])
105 loss_masks = 1 - tail_masks
106 input, loss_masks = self.trim((input, loss_masks))
107 return input, loss_masks, images
109 def add_true_image(self, input, images, loss_masks):
110 t_nul = self.token2id["<nul>"]
111 nb_img_tokens = self.height * self.width + 1
112 input = F.pad(input, (0, nb_img_tokens), value=t_nul)
113 loss_masks = F.pad(loss_masks, (0, nb_img_tokens), value=0)
114 t = (input == t_nul).long()
115 i = (t.cumsum(dim=1) == 1).nonzero(as_tuple=True)
118 i[1][:, None] + torch.arange(nb_img_tokens, device=input.device)[None, :],
122 input, loss_masks = self.trim((input, loss_masks))
123 return input, loss_masks
125 def add_generated_image(self, input, loss_masks, model, deterministic_synthesis):
126 t_img, t_nul = self.token2id["<img>"], self.token2id["<nul>"]
127 nb_img_tokens = self.height * self.width + 1
129 input = F.pad(input, (0, nb_img_tokens), value=t_nul)
130 loss_masks = F.pad(loss_masks, (0, nb_img_tokens), value=0)
131 t = (input == t_nul).long()
132 i = (t.cumsum(dim=1) == 1).nonzero(as_tuple=True)
139 + torch.arange(nb_img_tokens - 1, device=input.device)[None, :],
141 ar_masks = input.new_zeros(input.size(), dtype=torch.int64)
144 torch.arange(self.vocabulary_size(), device=input.device) == t_nul
146 with torch.autograd.no_grad():
149 masked_inplace_autoregression(
154 deterministic_synthesis,
156 progress_bar_desc=None,
161 input, loss_masks = self.trim((input, loss_masks))
163 return input, loss_masks
165 ######################
176 device=torch.device("cpu"),
180 def generate_descr(nb, cache_suffix, pruner):
181 return picoclvr.generate(
191 self.batch_size = batch_size
193 self.pruner_train = pruner_train
194 self.pruner_eval = pruner_eval
197 "nb_train_samples": nb_train_samples,
198 "nb_test_samples": nb_test_samples,
201 "nb_colors": nb_colors,
202 "batch_size": batch_size,
203 "rng_state": list(torch.get_rng_state()),
206 if logger is not None:
208 f"generating {nb_train_samples+nb_test_samples} samples (can take some time)"
211 self.train_descr = generate_descr(
212 nb_train_samples, "train", pruner=self.pruner_train
214 self.test_descr = generate_descr(nb_test_samples, "test", pruner=None)
216 # Build the tokenizer
217 tokens = {"<nul>", "<img>"}
218 for d in [self.train_descr, self.test_descr]:
220 for t in s.strip().split(" "):
222 # make this set a sorted list to get the same tensors given
224 tokens = list(tokens)
226 self.token2id = dict([(t, n) for n, t in enumerate(tokens)])
227 self.id2token = dict([(n, t) for n, t in enumerate(tokens)])
229 # Tokenize the train and test sets
230 self.train_input = self.tensorize(self.train_descr)
231 self.test_input = self.tensorize(self.test_descr)
233 def batches(self, split="train"):
234 assert split in {"train", "test"}
235 input = self.train_input if split == "train" else self.test_input
236 for batch in tqdm.tqdm(
237 input.split(self.batch_size), dynamic_ncols=True, desc=f"epoch-{split}"
239 yield self.trim(batch)
241 def vocabulary_size(self):
242 return len(self.token2id)
244 def compute_missing_properties(
245 self, n_epoch, model, logger, deterministic_synthesis, pruner=None
247 acc_nb_requested_properties = []
248 acc_nb_missing_properties = []
251 for input in tqdm.tqdm(
252 self.test_input.split(self.batch_size),
254 desc=f"test-properties",
256 tape, loss_masks, _ = self.excise_last_image(input)
257 tape, loss_masks = self.add_generated_image(
258 tape, loss_masks, model, deterministic_synthesis
260 result_descr = self.detensorize(tape)
261 np = picoclvr.nb_properties(
267 nb_requested_properties, _, nb_missing_properties = zip(*np)
268 acc_nb_requested_properties += nb_requested_properties
269 acc_nb_missing_properties += nb_missing_properties
270 acc_nb_results += len(result_descr)
272 nb_requested_properties = sum(acc_nb_requested_properties)
273 nb_missing_properties = sum(acc_nb_missing_properties)
275 prefix = "" if pruner is None else "pruned_"
276 logger(f"nb_{prefix}samples {n_epoch} {acc_nb_results}")
278 f"property_{prefix}nb {n_epoch} requested {sum(acc_nb_requested_properties)} missing {sum(acc_nb_missing_properties)}"
281 f"property_{prefix}miss {n_epoch} {100*nb_missing_properties/nb_requested_properties:.02f}%"
284 ######################################################################
287 self, n_epoch, model, result_dir, logger, deterministic_synthesis
289 self.compute_missing_properties(n_epoch, model, logger, deterministic_synthesis)
291 if self.pruner_eval is not None:
292 self.compute_missing_properties(n_epoch, model, self.pruner_eval)
294 nb_tokens_to_generate = self.height * self.width + 3
299 for primer_descr in [
300 "red above green <sep> green top <sep> blue right of red",
301 "there is red <sep> there is yellow <sep> there is blue",
302 "red below yellow <sep> yellow below green <sep> green below blue <sep> red right <sep> yellow left <sep> green right <sep> blue left",
303 "green bottom <sep> yellow bottom <sep> green left of blue <sep> yellow right of blue <sep> blue top",
305 primer += [primer_descr] * nb_per_primer
307 tape = self.tensorize(primer)
308 loss_masks = 1 - (tape == self.token2id["<nul>"]).long()
309 tape, loss_masks = self.add_generated_image(
310 tape, loss_masks, model, deterministic_synthesis
312 result_descr = self.detensorize(tape)
314 np = picoclvr.nb_properties(result_descr, height=self.height, width=self.width)
316 acc_nb_requested_properties, _, acc_nb_missing_properties = zip(*np)
317 acc_nb_results = len(result_descr)
319 nb_requested_properties = sum(acc_nb_requested_properties)
320 nb_missing_properties = sum(acc_nb_missing_properties)
323 logger(f"nb_{prefix}samples {n_epoch} {acc_nb_results}")
325 f"property_{prefix}nb {n_epoch} requested {sum(acc_nb_requested_properties)} missing {sum(acc_nb_missing_properties)}"
328 f"property_{prefix}miss {n_epoch} {100*nb_missing_properties/nb_requested_properties:.02f}%"
331 img = picoclvr.descr2img(result_descr, height=self.height, width=self.width)
335 img = F.pad(img.squeeze(1), pad=(1, 1, 1, 1), value=64)
339 torchvision.utils.make_grid(x, padding=1, pad_value=64)[None]
345 image_name = os.path.join(result_dir, f"picoclvr_result_{n_epoch:04d}.png")
346 torchvision.utils.save_image(
347 img / 255.0, image_name, nrow=nb_per_primer, padding=1, pad_value=0.0
349 logger(f"wrote {image_name}")
352 ######################################################################
357 self, nb_train_samples, nb_test_samples, batch_size, device=torch.device("cpu")
359 self.nb_train_samples = (nb_train_samples,)
360 self.nb_test_samples = (nb_test_samples,)
361 self.batch_size = batch_size
363 data_set = torchvision.datasets.MNIST(root="./data", train=True, download=True)
364 self.train_input = data_set.data[:nb_train_samples].view(-1, 28 * 28).long()
365 data_set = torchvision.datasets.MNIST(root="./data", train=False, download=True)
366 self.test_input = data_set.data[:nb_test_samples].view(-1, 28 * 28).long()
368 def batches(self, split="train", nb_to_use=-1, desc=None):
369 assert split in {"train", "test"}
370 input = self.train_input if split == "train" else self.test_input
372 input = input[:nb_to_use]
374 desc = f"epoch-{split}"
375 for batch in tqdm.tqdm(
376 input.split(self.batch_size), dynamic_ncols=True, desc=desc
380 def vocabulary_size(self):
384 self, n_epoch, model, result_dir, logger, deterministic_synthesis
386 results = torch.empty(64, 28 * 28, device=self.device, dtype=torch.int64)
387 ar_mask = torch.full_like(results, 1)
388 masked_inplace_autoregression(
393 deterministic_synthesis,
396 image_name = os.path.join(result_dir, f"mnist_result_{n_epoch:04d}.png")
397 torchvision.utils.save_image(
398 1 - results.reshape(-1, 1, 28, 28) / 255.0,
403 logger(f"wrote {image_name}")
406 ######################################################################
412 def map2seq(self, *m):
413 return torch.cat([x.flatten(1) for x in m], 1)
415 def seq2map(self, s):
416 s = s.reshape(s.size(0), -1, self.height, self.width)
417 return (s[:, k] for k in range(s.size(1)))
427 device=torch.device("cpu"),
429 self.batch_size = batch_size
434 train_mazes, train_paths, _ = maze.create_maze_data(
439 progress_bar=lambda x: tqdm.tqdm(x, dynamic_ncols=True, desc=f"data-train"),
441 self.train_input = self.map2seq(train_mazes.to(device), train_paths.to(device))
443 test_mazes, test_paths, _ = maze.create_maze_data(
448 progress_bar=lambda x: tqdm.tqdm(x, dynamic_ncols=True, desc=f"data-test"),
450 self.test_input = self.map2seq(test_mazes.to(device), test_paths.to(device))
452 self.nb_codes = max(self.train_input.max(), self.test_input.max()) + 1
454 def batches(self, split="train", nb_to_use=-1, desc=None):
455 assert split in {"train", "test"}
456 input = self.train_input if split == "train" else self.test_input
458 input = input[:nb_to_use]
460 desc = f"epoch-{split}"
461 for batch in tqdm.tqdm(
462 input.split(self.batch_size), dynamic_ncols=True, desc=desc
466 def vocabulary_size(self):
470 self, model, split="train", nb_to_use=-1, deterministic_synthesis=False
472 nb_total, nb_correct = 0, 0
474 self.width * self.height,
475 self.width * self.height,
480 for input in self.batches(split, nb_to_use):
481 result = input.clone()
482 ar_mask = result.new_zeros(result.size())
483 ar_mask[:, self.height * self.width :] = 1
484 result *= 1 - ar_mask
485 masked_inplace_autoregression(
490 deterministic_synthesis,
491 progress_bar_desc=None,
494 mazes, paths = self.seq2map(result)
495 path_correctness = maze.path_correctness(mazes, paths)
496 nb_correct += path_correctness.long().sum()
497 nb_total += mazes.size(0)
499 optimal_path_lengths = (
500 (input[:, self.height * self.width :] == maze.v_path).long().sum(1)
502 predicted_path_lengths = (
503 (result[:, self.height * self.width :] == maze.v_path).long().sum(1)
505 optimal_path_lengths = optimal_path_lengths[path_correctness]
506 predicted_path_lengths = predicted_path_lengths[path_correctness]
507 count[optimal_path_lengths, predicted_path_lengths] += 1
513 : count.sum(1).nonzero().max() + 1, : count.sum(0).nonzero().max() + 1
516 return nb_total, nb_correct, count
519 self, n_epoch, model, result_dir, logger, deterministic_synthesis
521 with torch.autograd.no_grad():
525 train_nb_total, train_nb_correct, count = self.compute_error(
529 deterministic_synthesis=deterministic_synthesis,
532 f"accuracy_train {n_epoch} nb_total {train_nb_total} nb_correct {train_nb_correct} accuracy {(100.0*train_nb_correct)/train_nb_total:.02f}%"
535 test_nb_total, test_nb_correct, count = self.compute_error(
539 deterministic_synthesis=deterministic_synthesis,
542 f"accuracy_test {n_epoch} nb_total {test_nb_total} nb_correct {test_nb_correct} accuracy {(100.0*test_nb_correct)/test_nb_total:.02f}%"
545 if count is not None:
546 proportion_optimal = count.diagonal().sum().float() / count.sum()
547 logger(f"proportion_optimal_test {proportion_optimal*100:.02f}%")
549 os.path.join(result_dir, f"maze_result_{n_epoch:04d}.txt"), "w"
551 for i in range(count.size(0)):
552 for j in range(count.size(1)):
553 eol = " " if j < count.size(1) - 1 else "\n"
554 f.write(f"{count[i,j]}{eol}")
556 input = self.test_input[:48]
557 result = input.clone()
558 ar_mask = result.new_zeros(result.size())
559 ar_mask[:, self.height * self.width :] = 1
560 result *= 1 - ar_mask
561 masked_inplace_autoregression(
566 deterministic_synthesis,
570 mazes, paths = self.seq2map(input)
571 _, predicted_paths = self.seq2map(result)
573 filename = os.path.join(result_dir, f"maze_result_{n_epoch:04d}.png")
578 predicted_paths=predicted_paths,
579 path_correct=maze.path_correctness(mazes, predicted_paths),
580 path_optimal=maze.path_optimality(paths, predicted_paths),
582 logger(f"wrote {filename}")
587 ######################################################################
604 device=torch.device("cpu"),
606 self.batch_size = batch_size
610 self.prompt_length = prompt_length
612 self.train_input, self.train_prior_visits, _, _ = snake.generate_sequences(
621 self.test_input, self.test_prior_visits, _, _ = snake.generate_sequences(
631 self.nb_codes = max(self.train_input.max(), self.test_input.max()) + 1
633 def batches(self, split="train", nb_to_use=-1, desc=None):
634 assert split in {"train", "test"}
635 input = self.train_input if split == "train" else self.test_input
637 input = input[:nb_to_use]
639 desc = f"epoch-{split}"
640 for batch in tqdm.tqdm(
641 input.split(self.batch_size), dynamic_ncols=True, desc=desc
645 def vocabulary_size(self):
649 self, n_epoch, model, result_dir, logger, deterministic_synthesis
651 with torch.autograd.no_grad():
655 def compute_nb_correct(input, prior_visits):
656 result = input.clone()
657 i = torch.arange(result.size(1), device=result.device)[None, :]
659 torch.logical_and(i >= self.prompt_length * 2, i % 2 == 0)
663 result *= 1 - ar_mask
665 # snake.solver(result,ar_mask)
667 masked_inplace_autoregression(
672 deterministic_synthesis,
676 nb_total = ((prior_visits > 0) * ar_mask).sum()
679 (result == input).long() * (prior_visits > 0) * ar_mask
682 # nb_total = result.size(0)
683 # nb_correct = ((result - input).abs().sum(1) == 0).sum()
685 return nb_total, nb_correct
687 # train_nb_total, train_nb_correct = compute_nb_correct(
688 # self.train_input, self.train_prior_visits
692 # f"accuracy_train nb_total {train_nb_total} nb_correct {train_nb_correct} accuracy {(100.0*train_nb_correct)/train_nb_total:.02f}%"
695 test_nb_total, test_nb_correct = compute_nb_correct(
696 self.test_input[:1000], self.test_prior_visits[:1000]
700 f"accuracy_test {n_epoch} nb_total {test_nb_total} nb_correct {test_nb_correct} accuracy {(100.0*test_nb_correct)/test_nb_total:.02f}%"
706 ######################################################################
722 fraction_values_for_train=None,
723 device=torch.device("cpu"),
725 self.batch_size = batch_size
726 self.nb_steps = nb_steps
727 self.nb_stacks = nb_stacks
728 self.nb_digits = nb_digits
731 if fraction_values_for_train is None:
732 values_for_train = None
733 values_for_test = None
735 all = torch.randperm(10**nb_digits)
736 nb_for_train = int(all.size(0) * fraction_values_for_train)
737 values_for_train = all[:nb_for_train]
738 values_for_test = all[nb_for_train:]
740 self.train_input, self.train_stack_counts = stack.generate_sequences(
749 self.test_input, self.test_stack_counts = stack.generate_sequences(
758 i = torch.logical_and(self.test_input % 2 == 1, self.test_input < 2 * nb_stacks)
759 counts = self.test_stack_counts.flatten()[i.flatten()]
760 counts = F.one_hot(counts).sum(0)
761 logger(f"test_pop_stack_counts {counts}")
763 self.nb_codes = max(self.train_input.max(), self.test_input.max()) + 1
765 def batches(self, split="train", nb_to_use=-1, desc=None):
766 assert split in {"train", "test"}
767 input = self.train_input if split == "train" else self.test_input
769 input = input[:nb_to_use]
771 desc = f"epoch-{split}"
772 for batch in tqdm.tqdm(
773 input.split(self.batch_size), dynamic_ncols=True, desc=desc
777 def vocabulary_size(self):
781 self, n_epoch, model, result_dir, logger, deterministic_synthesis
783 with torch.autograd.no_grad():
787 def compute_nb_correct(input):
788 result = input.clone()
789 stack.remove_popped_values(result, self.nb_stacks, self.nb_digits)
790 ar_mask = (result != input).long()
791 masked_inplace_autoregression(
796 deterministic_synthesis,
800 errors = ((result != input).long() * ar_mask).reshape(
801 -1, 1 + self.nb_digits
803 ar_mask = ar_mask.reshape(-1, 1 + self.nb_digits)
805 nb_total = ar_mask.max(1).values.sum()
806 nb_correct = nb_total - errors.max(1).values.sum()
808 return nb_total, nb_correct
810 test_nb_total, test_nb_correct = compute_nb_correct(self.test_input[:1000])
813 f"accuracy_test {n_epoch} nb_total {test_nb_total} nb_correct {test_nb_correct} accuracy {(100.0*test_nb_correct)/test_nb_total:.02f}%"
816 ##############################################################
817 # Log a few generated sequences
818 input = self.test_input[:10, : 12 * (1 + self.nb_digits)]
819 result = input.clone()
820 stack.remove_popped_values(result, self.nb_stacks, self.nb_digits)
821 ar_mask = (result != input).long()
822 for n in range(result.size(0)):
824 f"test_before {stack.seq_to_str(result[n],nb_stacks=self.nb_stacks,nb_digits=self.nb_digits)}"
826 masked_inplace_autoregression(
831 deterministic_synthesis,
834 for n in range(result.size(0)):
836 f"test_after {stack.seq_to_str(result[n],nb_stacks=self.nb_stacks,nb_digits=self.nb_digits)}"
838 ##############################################################
843 ######################################################################
857 device=torch.device("cpu"),
859 self.batch_size = batch_size
862 train_sequences = expr.generate_sequences(
864 nb_variables=nb_variables,
865 length=sequence_length,
866 # length=2 * sequence_length,
867 # randomize_length=True,
869 test_sequences = expr.generate_sequences(
871 nb_variables=nb_variables,
872 length=sequence_length,
877 for n, c in enumerate(
878 set("#" + "".join(train_sequences + test_sequences))
882 self.id2char = dict([(n, c) for c, n in self.char2id.items()])
884 self.filler, self.space = self.char2id["#"], self.char2id[" "]
886 len_max = max([len(x) for x in train_sequences])
887 self.train_input = torch.cat(
891 [self.char2id[c] for c in s + "#" * (len_max - len(s))]
892 for s in train_sequences
899 len_max = max([len(x) for x in test_sequences])
900 self.test_input = torch.cat(
904 [self.char2id[c] for c in s + "#" * (len_max - len(s))]
905 for s in test_sequences
912 self.nb_codes = max(self.train_input.max(), self.test_input.max()) + 1
914 def batches(self, split="train", nb_to_use=-1, desc=None):
915 assert split in {"train", "test"}
916 input = self.train_input if split == "train" else self.test_input
918 input = input[:nb_to_use]
920 desc = f"epoch-{split}"
921 for batch in tqdm.tqdm(
922 input.split(self.batch_size), dynamic_ncols=True, desc=desc
925 last = (batch != self.filler).max(0).values.nonzero().max() + 3
926 batch = batch[:, :last]
929 def vocabulary_size(self):
932 def seq2str(self, s):
933 return "".join([self.id2char[k.item()] for k in s])
936 self, n_epoch, model, result_dir, logger, deterministic_synthesis
938 with torch.autograd.no_grad():
942 def compute_nb_correct(input):
943 result = input.clone()
944 ar_mask = (result == self.space).long().cumsum(dim=1).clamp(max=1)
945 result = (1 - ar_mask) * result + ar_mask * self.filler
946 masked_inplace_autoregression(
951 deterministic_synthesis,
955 nb_total = input.size(0)
956 nb_correct = (input == result).long().min(1).values.sum()
958 #######################################################################
959 # Comput predicted vs. true variable values
961 nb_delta = torch.zeros(5, dtype=torch.int64)
964 values_input = expr.extract_results([self.seq2str(s) for s in input])
965 values_result = expr.extract_results([self.seq2str(s) for s in result])
967 for i, r in zip(values_input, values_result):
968 for n, vi in i.items():
970 if vr is None or vr < 0:
974 if d >= nb_delta.size(0):
979 ######################################################################
981 return nb_total, nb_correct, nb_delta, nb_missed
988 ) = compute_nb_correct(self.test_input[:1000])
991 f"accuracy_test {n_epoch} nb_total {test_nb_total} nb_correct {test_nb_correct} accuracy {(100.0*test_nb_correct)/test_nb_total:.02f}%"
994 nb_total = test_nb_delta.sum() + test_nb_missed
995 for d in range(test_nb_delta.size(0)):
997 f"error_value {n_epoch} delta {d} {test_nb_delta[d]} {test_nb_delta[d]*100/nb_total:.02f}%"
1000 f"error_value {n_epoch} missed {test_nb_missed} {test_nb_missed*100/nb_total:.02f}%"
1003 ##############################################################
1004 # Log a few generated sequences
1005 input = self.test_input[:10]
1006 result = input.clone()
1007 ar_mask = (result == self.space).long().cumsum(dim=1).clamp(max=1)
1008 result = (1 - ar_mask) * result + ar_mask * self.filler
1009 for n in range(result.size(0)):
1010 logger(f"test_before {self.seq2str(result[n])}")
1011 masked_inplace_autoregression(
1016 deterministic_synthesis,
1019 correct = (1 - ar_mask) * self.space + ar_mask * input
1020 for n in range(result.size(0)):
1021 comment = "GOOD" if (result[n] - input[n]).abs().max() == 0 else ""
1022 logger(f"test_after {self.seq2str(result[n])} {comment}")
1023 logger(f"correct {self.seq2str(correct[n])}")
1024 ##############################################################
1029 ######################################################################