3 # Any copyright is dedicated to the Public Domain.
4 # https://creativecommons.org/publicdomain/zero/1.0/
6 # Written by Francois Fleuret <francois@fleuret.org>
8 import math, os, tqdm, warnings
10 import torch, torchvision
13 from torch.nn import functional as F
15 from mygpt import BracketedSequence
17 # from graph import save_attention_image
18 save_attention_image = None
20 ######################################################################
23 def masked_inplace_autoregression(
28 deterministic_synthesis,
29 forbidden_tokens=None,
31 progress_bar_desc="autoregression",
32 device=torch.device("cpu"),
34 assert input.size() == ar_mask.size()
36 batches = zip(input.split(batch_size), ar_mask.split(batch_size))
38 if progress_bar_desc is not None:
42 desc=progress_bar_desc,
43 total=(input.size(0) + batch_size - 1) // batch_size,
46 with torch.autograd.no_grad():
50 for input, ar_mask in batches:
51 model.masked_inplace_autoregression(
54 deterministic_synthesis,
62 ######################################################################
66 def batches(self, split="train", nb_to_use=-1, desc=None):
69 def vocabulary_size(self):
73 self, n_epoch, model, result_dir, logger, deterministic_synthesis
78 class TaskFromFile(Task):
79 def tensorize(self, pairs, shuffle):
80 len_max = max([len(x[0]) for x in pairs])
86 [self.char2id[c] for c in s[0] + "#" * (len_max - len(s[0]))]
94 pred_mask = torch.cat(
98 [int(c) for c in s[1] + "0" * (len_max - len(s[1]))]
107 i = torch.randperm(input.size(0))
108 input = input[i].contiguous()
109 pred_mask = pred_mask[i].contiguous()
111 return input, pred_mask
113 # trim all the tensors in the tuple z to remove as much token from
114 # left and right in the first tensor. If z is a tuple, all its
115 # elements are trimed according to the triming for the first
116 def trim(self, z, token="#"):
117 n = self.char2id[token]
120 i = (1 - (F.pad(x, (1, 1), value=n) == n).min(0).values.long()).cumsum(0)
121 a, b = (i == 0).nonzero().max(), (i == i.max()).nonzero().min()
122 return tuple([t[:, a:b] for t in z])
124 i = (1 - (F.pad(z, (1, 1), value=n) == n).min(0).values.long()).cumsum(0)
125 a, b = (i == 0).nonzero().max(), (i == i.max()).nonzero().min()
136 device=torch.device("cpu"),
138 self.batch_size = batch_size
141 def read_file(filename, nb=-1):
143 with open(filename, "r") as f:
145 sequence = f.readline().strip()
148 pred_mask = f.readline().strip()
149 assert len(sequence) == len(pred_mask)
150 assert set(pred_mask).issubset({"0", "1", "2"}), f"{set(pred_mask)}"
151 pairs.append((sequence, pred_mask))
157 assert len(pairs) == nb
161 train_pairs = read_file(train_filename, nb_train_samples)
162 test_pairs = read_file(test_filename, nb_test_samples)
164 symbols = ["#"] + list(
165 set("".join([x[0] for x in train_pairs + test_pairs])) - set(["#"])
167 self.char2id = dict([(c, n) for n, c in enumerate(symbols)])
168 self.id2char = dict([(n, c) for c, n in self.char2id.items()])
170 self.train_input, self.train_pred_masks = self.tensorize(
171 train_pairs, shuffle=shuffle
173 self.test_input, self.test_pred_masks = self.tensorize(
174 test_pairs, shuffle=shuffle
177 def batches(self, split="train", nb_to_use=-1, desc=None):
178 assert split in {"train", "test"}
179 input = self.train_input if split == "train" else self.test_input
181 input = input[:nb_to_use]
183 desc = f"epoch-{split}"
184 for batch in tqdm.tqdm(
185 input.split(self.batch_size), dynamic_ncols=True, desc=desc
187 yield self.trim(batch).to(self.device)
189 def vocabulary_size(self):
190 return len(self.char2id)
192 def tensor2str(self, t):
193 return ["".join([self.id2char[x.item()] for x in s]) for s in t]
196 self, n_epoch, model, result_dir, logger, deterministic_synthesis
198 correct = self.trim(self.test_input[:1000]).to(self.device)
199 result = correct.clone()
200 pred_mask = self.test_pred_masks[:1000, : result.size(1)].to(self.device)
201 ar_mask = (pred_mask > 0).long()
202 result *= 1 - ar_mask # paraaaaanoiaaaaaaa
204 logger(f"----------------------------------------------------------")
206 for e in self.tensor2str(result[:50]):
207 logger(f"test_before {e}")
209 masked_inplace_autoregression(
214 deterministic_synthesis,
218 logger(f"----------------------------------------------------------")
220 for e, c in zip(self.tensor2str(result[:50]), self.tensor2str(correct[:50])):
221 logger(f"test_after {e}")
222 logger(f"correct {c}")
224 logger(f"----------------------------------------------------------")
226 err_mask = (pred_mask == 2).long()
227 nb_total = err_mask.sum().item()
228 nb_correct = ((correct == result).long() * err_mask).sum().item()
230 logger(f"test_performance {n_epoch} {nb_total=} {nb_correct=}")
231 logger(f"main_test_accuracy {n_epoch} {nb_correct / nb_total}")
247 device=torch.device("cpu"),
252 self.batch_size = batch_size
254 self.problem = problem
256 self.train_input, self.train_ar_mask = self.problem.generate_sequences(
259 self.test_input, self.test_ar_mask = self.problem.generate_sequences(
263 self.train_input, self.train_ar_mask = self.train_input.to(
265 ), self.train_ar_mask.to(device)
266 self.test_input, self.test_ar_mask = self.test_input.to(
268 ), self.test_ar_mask.to(device)
270 self.nb_codes = max(self.train_input.max(), self.test_input.max()) + 1
272 # A bit of paranoia never hurts
273 assert self.nb_codes <= max_nb_codes
274 assert self.train_input.min() >= 0
275 assert self.test_input.min() >= 0
276 assert tuple(x.item() for x in self.train_ar_mask.unique()) in {
281 assert tuple(x.item() for x in self.test_ar_mask.unique()) in {
287 if logger is not None:
288 for s, a in zip(self.train_input[:100], self.train_ar_mask[:100]):
289 logger(f"train_sequences {self.problem.seq2str(s)}")
290 a = "".join(["01"[x.item()] for x in a])
293 def batches(self, split="train", nb_to_use=-1, desc=None):
294 assert split in {"train", "test"}
295 input = self.train_input if split == "train" else self.test_input
297 input = input[:nb_to_use]
299 desc = f"epoch-{split}"
300 for batch in tqdm.tqdm(
301 input.split(self.batch_size), dynamic_ncols=True, desc=desc
305 def vocabulary_size(self):
309 self, n_epoch, model, result_dir, logger, deterministic_synthesis, nmax=1000
311 def compute_accuracy(input, ar_mask, logger=None):
312 input, ar_mask = input[:nmax], ar_mask[:nmax]
313 result = input.clone() * (1 - ar_mask)
315 masked_inplace_autoregression(
320 deterministic_synthesis,
321 progress_bar_desc=None,
325 log_ground_truth = ar_mask.min() == 0
327 if logger is not None:
328 for sp, st in zip(result[:10], input[:10]):
330 f"test_sequences {n_epoch} prediction {self.problem.seq2str(sp)}"
334 f" {n_epoch} ground truth {self.problem.seq2str(st)}"
337 nb_total, nb_correct = self.problem.compute_nb_correct(
338 input, ar_mask, result
341 # nb_total = ar_mask.sum().item()
342 # nb_correct = ((result == input).long() * ar_mask).sum().item()
344 return nb_total, nb_correct
346 train_nb_total, train_nb_correct = compute_accuracy(
347 self.train_input, self.train_ar_mask
351 f"accuracy_train {n_epoch} nb_total {train_nb_total} nb_correct {train_nb_correct} accuracy {(100.0*train_nb_correct)/train_nb_total:.02f}%"
354 test_nb_total, test_nb_correct = compute_accuracy(
355 self.test_input, self.test_ar_mask, logger
359 f"accuracy_test {n_epoch} nb_total {test_nb_total} nb_correct {test_nb_correct} accuracy {(100.0*test_nb_correct)/test_nb_total:.02f}%"
362 logger(f"main_test_accuracy {n_epoch} {test_nb_correct/test_nb_total}")
364 if save_attention_image is not None:
366 ns = torch.randint(self.test_input.size(0), (1,)).item()
367 input = self.test_input[ns : ns + 1].clone()
369 with torch.autograd.no_grad():
372 # model.record_attention(True)
373 model(BracketedSequence(input))
375 # ram = model.retrieve_attention()
376 # model.record_attention(False)
378 # tokens_output = [c for c in self.problem.seq2str(input[0])]
379 # tokens_input = ["n/a"] + tokens_output[:-1]
380 # for n_head in range(ram[0].size(1)):
381 # filename = os.path.join(
382 # result_dir, f"sandbox_attention_{k}_h{n_head}.pdf"
384 # attention_matrices = [m[0, n_head] for m in ram]
385 # save_attention_image(
389 # attention_matrices,
391 ##min_total_attention=0.9,
395 # logger(f"wrote {filename}")
398 ######################################################################
403 class PicoCLVR(Task):
404 # Make a tensor from a list of strings
405 def tensorize(self, descr):
406 token_descr = [s.strip().split(" ") for s in descr]
407 l = max([len(s) for s in token_descr])
408 token_descr = [s + ["<nul>"] * (l - len(s)) for s in token_descr]
409 id_descr = [[self.token2id[u] for u in s] for s in token_descr]
410 return torch.tensor(id_descr, device=self.device)
412 # Make a list of strings from a tensor
413 def detensorize(self, x):
414 return [" ".join([self.id2token[t.item()] for t in r]) for r in x]
416 # trim all the tensors in the tuple z to remove as much token from
417 # left and right in the first tensor. If z is a tuple, all its
418 # elements are trimed according to the triming for the first
419 def trim(self, z, token="<nul>"):
420 n = self.token2id[token]
423 i = (1 - (F.pad(x, (1, 1), value=n) == n).min(0).values.long()).cumsum(0)
424 a, b = (i == 0).nonzero().max(), (i == i.max()).nonzero().min()
425 return tuple([t[:, a:b] for t in z])
427 i = (1 - (F.pad(z, (1, 1), value=n) == n).min(0).values.long()).cumsum(0)
428 a, b = (i == 0).nonzero().max(), (i == i.max()).nonzero().min()
431 ######################
442 device=torch.device("cpu"),
448 def generate_descr(nb, cache_suffix, pruner):
449 return picoclvr.generate(
459 self.batch_size = batch_size
461 self.pruner_train = pruner_train
462 self.pruner_eval = pruner_eval
464 if logger is not None:
466 f"generating {nb_train_samples+nb_test_samples} samples (can take some time)"
469 self.train_descr = generate_descr(
470 nb_train_samples, "train", pruner=self.pruner_train
472 self.test_descr = generate_descr(nb_test_samples, "test", pruner=None)
474 # Build the tokenizer
475 tokens = {"<nul>", "<img>"}
476 for d in [self.train_descr, self.test_descr]:
478 for t in s.strip().split(" "):
480 # make this set a sorted list to get the same tensors given
482 tokens = list(tokens)
484 self.token2id = dict([(t, n) for n, t in enumerate(tokens)])
485 self.id2token = dict([(n, t) for n, t in enumerate(tokens)])
486 self.t_img, self.t_nul = self.token2id["<img>"], self.token2id["<nul>"]
488 # Tokenize the train and test sets
489 self.train_input = self.tensorize(self.train_descr)
490 self.test_input = self.tensorize(self.test_descr)
492 def batches(self, split="train", nb_to_use=-1, desc=None):
493 assert split in {"train", "test"}
494 input = self.train_input if split == "train" else self.test_input
495 for batch in tqdm.tqdm(
496 input.split(self.batch_size), dynamic_ncols=True, desc=f"epoch-{split}"
498 yield self.trim(batch)
500 def vocabulary_size(self):
501 return len(self.token2id)
503 def compute_missing_properties(
504 self, n_epoch, model, logger, deterministic_synthesis, pruner=None
506 acc_nb_requested_properties = []
507 acc_nb_missing_properties = []
510 for input in tqdm.tqdm(
511 self.test_input.split(self.batch_size),
513 desc=f"test-properties",
515 result = input.clone()
516 ar_mask = (result == self.t_img).long().cumsum(dim=1).clamp(max=1)
517 result = (1 - ar_mask) * result + ar_mask * self.t_nul
518 masked_inplace_autoregression(
523 deterministic_synthesis,
524 progress_bar_desc=None,
528 result_descr = self.detensorize(result)
529 np = picoclvr.nb_properties(
535 nb_requested_properties, _, nb_missing_properties = zip(*np)
536 acc_nb_requested_properties += nb_requested_properties
537 acc_nb_missing_properties += nb_missing_properties
538 acc_nb_results += len(result_descr)
540 nb_requested_properties = sum(acc_nb_requested_properties)
541 nb_missing_properties = sum(acc_nb_missing_properties)
543 prefix = "" if pruner is None else "pruned_"
544 logger(f"nb_{prefix}samples {n_epoch} {acc_nb_results}")
546 f"property_{prefix}nb {n_epoch} requested {sum(acc_nb_requested_properties)} missing {sum(acc_nb_missing_properties)}"
549 f"property_{prefix}miss {n_epoch} {100*nb_missing_properties/nb_requested_properties:.02f}%"
553 f"main_test_accuracy {n_epoch} {1-nb_missing_properties/nb_requested_properties}"
556 ######################################################################
559 self, n_epoch, model, result_dir, logger, deterministic_synthesis
561 self.compute_missing_properties(n_epoch, model, logger, deterministic_synthesis)
563 if self.pruner_eval is not None:
564 self.compute_missing_properties(n_epoch, model, self.pruner_eval)
566 nb_tokens_to_generate = self.height * self.width + 3
571 for primer_descr in [
572 "red above green <sep> green top <sep> blue right of red",
573 "there is red <sep> there is yellow <sep> there is blue",
574 "red below yellow <sep> yellow below green <sep> green below blue <sep> red right <sep> yellow left <sep> green right <sep> blue left",
575 "green bottom <sep> yellow bottom <sep> green left of blue <sep> yellow right of blue <sep> blue top",
577 primer += [primer_descr + " <img>"] * nb_per_primer
579 result = self.tensorize(primer)
580 fill = result.new_full(
581 result.size()[:-1] + (self.height * self.width + 1,), self.t_nul
583 result = torch.cat((result, fill), 1)
584 ar_mask = (result == self.t_nul).long()
585 masked_inplace_autoregression(
590 deterministic_synthesis,
593 result_descr = self.detensorize(result)
595 np = picoclvr.nb_properties(result_descr, height=self.height, width=self.width)
597 acc_nb_requested_properties, _, acc_nb_missing_properties = zip(*np)
598 acc_nb_results = len(result_descr)
600 nb_requested_properties = sum(acc_nb_requested_properties)
601 nb_missing_properties = sum(acc_nb_missing_properties)
604 logger(f"nb_{prefix}samples {n_epoch} {acc_nb_results}")
606 f"property_{prefix}nb {n_epoch} requested {sum(acc_nb_requested_properties)} missing {sum(acc_nb_missing_properties)}"
609 f"property_{prefix}miss {n_epoch} {100*nb_missing_properties/nb_requested_properties:.02f}%"
612 img = picoclvr.descr2img(result_descr, height=self.height, width=self.width)
616 img = F.pad(img.squeeze(1), pad=(1, 1, 1, 1), value=64)
620 torchvision.utils.make_grid(x, padding=1, pad_value=64)[None]
626 image_name = os.path.join(result_dir, f"picoclvr_result_{n_epoch:04d}.png")
627 torchvision.utils.save_image(
628 img / 255.0, image_name, nrow=nb_per_primer, padding=1, pad_value=0.0
630 logger(f"wrote {image_name}")
633 ######################################################################
638 self, nb_train_samples, nb_test_samples, batch_size, device=torch.device("cpu")
642 self.nb_train_samples = (nb_train_samples,)
643 self.nb_test_samples = (nb_test_samples,)
644 self.batch_size = batch_size
646 data_set = torchvision.datasets.MNIST(root="./data", train=True, download=True)
647 self.train_input = data_set.data[:nb_train_samples].view(-1, 28 * 28).long()
648 data_set = torchvision.datasets.MNIST(root="./data", train=False, download=True)
649 self.test_input = data_set.data[:nb_test_samples].view(-1, 28 * 28).long()
651 def batches(self, split="train", nb_to_use=-1, desc=None):
652 assert split in {"train", "test"}
653 input = self.train_input if split == "train" else self.test_input
655 input = input[:nb_to_use]
657 desc = f"epoch-{split}"
658 for batch in tqdm.tqdm(
659 input.split(self.batch_size), dynamic_ncols=True, desc=desc
663 def vocabulary_size(self):
667 self, n_epoch, model, result_dir, logger, deterministic_synthesis
669 results = torch.empty(64, 28 * 28, device=self.device, dtype=torch.int64)
670 ar_mask = torch.full_like(results, 1)
671 masked_inplace_autoregression(
676 deterministic_synthesis,
679 image_name = os.path.join(result_dir, f"mnist_result_{n_epoch:04d}.png")
680 torchvision.utils.save_image(
681 1 - results.reshape(-1, 1, 28, 28) / 255.0,
686 logger(f"wrote {image_name}")
689 ######################################################################
695 def map2seq(self, *m):
696 return torch.cat([x.flatten(1) for x in m], 1)
698 def seq2map(self, s):
699 s = s.reshape(s.size(0), -1, self.height, self.width)
700 return (s[:, k] for k in range(s.size(1)))
710 device=torch.device("cpu"),
714 self.batch_size = batch_size
719 train_mazes, train_paths, _ = maze.create_maze_data(
724 progress_bar=lambda x: tqdm.tqdm(x, dynamic_ncols=True, desc=f"data-train"),
726 self.train_input = self.map2seq(train_mazes.to(device), train_paths.to(device))
728 test_mazes, test_paths, _ = maze.create_maze_data(
733 progress_bar=lambda x: tqdm.tqdm(x, dynamic_ncols=True, desc=f"data-test"),
735 self.test_input = self.map2seq(test_mazes.to(device), test_paths.to(device))
737 self.nb_codes = max(self.train_input.max(), self.test_input.max()) + 1
739 def batches(self, split="train", nb_to_use=-1, desc=None):
740 assert split in {"train", "test"}
741 input = self.train_input if split == "train" else self.test_input
743 input = input[:nb_to_use]
745 desc = f"epoch-{split}"
746 for batch in tqdm.tqdm(
747 input.split(self.batch_size), dynamic_ncols=True, desc=desc
751 def vocabulary_size(self):
755 self, model, split="train", nb_to_use=-1, deterministic_synthesis=False
757 model_device = next(model.parameters()).device
758 nb_total, nb_correct = 0, 0
760 self.width * self.height,
761 self.width * self.height,
766 for input in self.batches(split, nb_to_use):
767 input = input.to(model_device)
768 result = input.clone()
769 ar_mask = result.new_zeros(result.size())
770 ar_mask[:, self.height * self.width :] = 1
771 result *= 1 - ar_mask
772 masked_inplace_autoregression(
777 deterministic_synthesis,
778 progress_bar_desc=None,
781 mazes, paths = self.seq2map(result)
782 path_correctness = maze.path_correctness(mazes, paths)
783 nb_correct += path_correctness.long().sum()
784 nb_total += mazes.size(0)
786 optimal_path_lengths = (
787 (input[:, self.height * self.width :] == maze.v_path).long().sum(1)
789 predicted_path_lengths = (
790 (result[:, self.height * self.width :] == maze.v_path).long().sum(1)
792 optimal_path_lengths = optimal_path_lengths[path_correctness]
793 predicted_path_lengths = predicted_path_lengths[path_correctness]
794 count[optimal_path_lengths, predicted_path_lengths] += 1
800 : count.sum(1).nonzero().max() + 1, : count.sum(0).nonzero().max() + 1
803 return nb_total, nb_correct, count
806 self, n_epoch, model, result_dir, logger, deterministic_synthesis
808 train_nb_total, train_nb_correct, count = self.compute_error(
812 deterministic_synthesis=deterministic_synthesis,
815 f"accuracy_train {n_epoch} nb_total {train_nb_total} nb_correct {train_nb_correct} accuracy {(100.0*train_nb_correct)/train_nb_total:.02f}%"
818 test_nb_total, test_nb_correct, count = self.compute_error(
822 deterministic_synthesis=deterministic_synthesis,
825 f"accuracy_test {n_epoch} nb_total {test_nb_total} nb_correct {test_nb_correct} accuracy {(100.0*test_nb_correct)/test_nb_total:.02f}%"
828 logger(f"main_test_accuracy {n_epoch} {test_nb_correct/test_nb_total}")
830 if count is not None:
831 proportion_optimal = count.diagonal().sum().float() / count.sum()
832 logger(f"proportion_optimal_test {proportion_optimal*100:.02f}%")
834 os.path.join(result_dir, f"maze_result_{n_epoch:04d}.txt"), "w"
836 for i in range(count.size(0)):
837 for j in range(count.size(1)):
838 eol = " " if j < count.size(1) - 1 else "\n"
839 f.write(f"{count[i,j]}{eol}")
841 input = self.test_input[:48].to(next(model.parameters()).device)
842 result = input.clone()
843 ar_mask = result.new_zeros(result.size())
844 ar_mask[:, self.height * self.width :] = 1
845 result *= 1 - ar_mask
846 masked_inplace_autoregression(
851 deterministic_synthesis,
855 mazes, paths = self.seq2map(input)
856 _, predicted_paths = self.seq2map(result)
858 filename = os.path.join(result_dir, f"maze_result_{n_epoch:04d}.png")
863 predicted_paths=predicted_paths,
864 path_correct=maze.path_correctness(mazes, predicted_paths),
865 path_optimal=maze.path_optimality(paths, predicted_paths),
867 logger(f"wrote {filename}")
870 ######################################################################
887 device=torch.device("cpu"),
891 self.batch_size = batch_size
895 self.prompt_length = prompt_length
897 self.train_input, self.train_prior_visits, _, _ = snake.generate_sequences(
906 self.test_input, self.test_prior_visits, _, _ = snake.generate_sequences(
916 self.nb_codes = max(self.train_input.max(), self.test_input.max()) + 1
918 def batches(self, split="train", nb_to_use=-1, desc=None):
919 assert split in {"train", "test"}
920 input = self.train_input if split == "train" else self.test_input
922 input = input[:nb_to_use]
924 desc = f"epoch-{split}"
925 for batch in tqdm.tqdm(
926 input.split(self.batch_size), dynamic_ncols=True, desc=desc
930 def vocabulary_size(self):
934 self, n_epoch, model, result_dir, logger, deterministic_synthesis
936 def compute_nb_correct(input, prior_visits):
937 result = input.clone()
938 i = torch.arange(result.size(1), device=result.device)[None, :]
940 torch.logical_and(i >= self.prompt_length * 2, i % 2 == 0)
944 result *= 1 - ar_mask
946 masked_inplace_autoregression(
951 deterministic_synthesis,
955 nb_total = ((prior_visits > 0) * ar_mask).sum()
957 nb_correct = ((result == input).long() * (prior_visits > 0) * ar_mask).sum()
959 return nb_total, nb_correct
961 test_nb_total, test_nb_correct = compute_nb_correct(
962 self.test_input[:1000], self.test_prior_visits[:1000]
966 f"accuracy_test {n_epoch} nb_total {test_nb_total} nb_correct {test_nb_correct} accuracy {(100.0*test_nb_correct)/test_nb_total:.02f}%"
969 logger(f"main_test_accuracy {n_epoch} {test_nb_correct/test_nb_total}")
972 ######################################################################
988 fraction_values_for_train=None,
989 device=torch.device("cpu"),
993 self.batch_size = batch_size
994 self.nb_steps = nb_steps
995 self.nb_stacks = nb_stacks
996 self.nb_digits = nb_digits
999 if fraction_values_for_train is None:
1000 values_for_train = None
1001 values_for_test = None
1003 all = torch.randperm(10**nb_digits)
1004 nb_for_train = int(all.size(0) * fraction_values_for_train)
1005 values_for_train = all[:nb_for_train]
1006 values_for_test = all[nb_for_train:]
1008 self.train_input, self.train_stack_counts = stack.generate_sequences(
1017 self.test_input, self.test_stack_counts = stack.generate_sequences(
1026 i = torch.logical_and(self.test_input % 2 == 1, self.test_input < 2 * nb_stacks)
1027 counts = self.test_stack_counts.flatten()[i.flatten()]
1028 counts = F.one_hot(counts).sum(0)
1029 logger(f"test_pop_stack_counts {counts}")
1031 self.nb_codes = max(self.train_input.max(), self.test_input.max()) + 1
1033 def batches(self, split="train", nb_to_use=-1, desc=None):
1034 assert split in {"train", "test"}
1035 input = self.train_input if split == "train" else self.test_input
1037 input = input[:nb_to_use]
1039 desc = f"epoch-{split}"
1040 for batch in tqdm.tqdm(
1041 input.split(self.batch_size), dynamic_ncols=True, desc=desc
1045 def vocabulary_size(self):
1046 return self.nb_codes
1048 def produce_results(
1049 self, n_epoch, model, result_dir, logger, deterministic_synthesis
1051 def compute_nb_correct(input):
1052 result = input.clone()
1053 stack.remove_popped_values(result, self.nb_stacks, self.nb_digits)
1054 ar_mask = (result != input).long()
1055 masked_inplace_autoregression(
1060 deterministic_synthesis,
1064 errors = ((result != input).long() * ar_mask).reshape(
1065 -1, 1 + self.nb_digits
1067 ar_mask = ar_mask.reshape(-1, 1 + self.nb_digits)
1069 nb_total = ar_mask.max(1).values.sum()
1070 nb_correct = nb_total - errors.max(1).values.sum()
1072 return nb_total, nb_correct
1074 test_nb_total, test_nb_correct = compute_nb_correct(self.test_input[:1000])
1077 f"accuracy_test {n_epoch} nb_total {test_nb_total} nb_correct {test_nb_correct} accuracy {(100.0*test_nb_correct)/test_nb_total:.02f}%"
1080 logger(f"main_test_accuracy {n_epoch} {test_nb_correct/test_nb_total}")
1082 ##############################################################
1083 # Log a few generated sequences
1084 input = self.test_input[:10, : 12 * (1 + self.nb_digits)]
1085 result = input.clone()
1086 stack.remove_popped_values(result, self.nb_stacks, self.nb_digits)
1087 ar_mask = (result != input).long()
1089 # for n in range(result.size(0)):
1091 # f"test_before {stack.seq_to_str(result[n],nb_stacks=self.nb_stacks,nb_digits=self.nb_digits)}"
1094 masked_inplace_autoregression(
1099 deterministic_synthesis,
1103 #!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
1104 for label, input in [
1105 ("train", self.train_input[:32]),
1106 ("test", self.test_input[:32]),
1108 output = model(BracketedSequence(input)).x
1109 output = output.log_softmax(dim=-1)
1110 filename = os.path.join(
1111 result_dir, f"stack_with_crossentropy_{n_epoch:04d}_{label}.txt"
1113 with open(filename, "w") as f:
1114 for n in range(input.size(0)):
1115 s = stack.seq_to_str(
1116 input[n], nb_stacks=self.nb_stacks, nb_digits=self.nb_digits
1118 for t, k, w in zip(range(input[n].size(0)), input[n], s.split(" ")):
1123 + str(output[n][t][k].exp().item())
1128 logger(f"wrote {filename}")
1129 #!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
1131 for n in range(result.size(0)):
1133 f"test_after {stack.seq_to_str(result[n],nb_stacks=self.nb_stacks,nb_digits=self.nb_digits)}"
1135 ##############################################################
1138 ######################################################################
1144 def tensorize(self, sequences):
1145 len_max = max([len(x) for x in sequences])
1151 self.token2id[str(c)]
1152 for c in s + ["<nul>"] * (len_max - len(s))
1161 def seq2str(self, seq):
1162 return " ".join([self.id2token[i] for i in seq])
1169 nb_starting_values=3,
1175 device=torch.device("cpu"),
1179 self.batch_size = batch_size
1180 self.device = device
1181 self.no_prog = no_prog
1185 nb_starting_values=nb_starting_values,
1186 nb_result_values_max=4 * nb_starting_values,
1187 max_input=max_input,
1191 for _ in tqdm.tqdm(range(nb_train_samples), desc="train-data")
1196 nb_starting_values=nb_starting_values,
1197 nb_result_values_max=4 * nb_starting_values,
1198 max_input=max_input,
1202 for _ in tqdm.tqdm(range(nb_test_samples), desc="test-data")
1206 set(["<nul>"] + [x for l in train_sequences + test_sequences for x in l])
1208 val_max = max([x if type(x) is int else 0 for x in symbols])
1209 symbols = list(filter(lambda x: type(x) is str, symbols))
1211 symbols += [str(n) for n in range(val_max + 1)]
1212 self.token2id = dict([(c, n) for n, c in enumerate(symbols)])
1213 self.id2token = dict([(n, c) for c, n in self.token2id.items()])
1215 self.t_nul = self.token2id["<nul>"]
1216 self.t_input = self.token2id["<in>"]
1217 self.t_output = self.token2id["<out>"]
1218 self.t_prog = self.token2id["<prg>"]
1219 self.t_end = self.token2id["<end>"]
1221 self.train_input = self.tensorize(train_sequences)
1222 self.test_input = self.tensorize(test_sequences)
1225 # Excise the program from every train and test example
1226 k = torch.arange(self.train_input.size(1), device=self.train_input.device)[
1230 ((self.train_input == self.t_prog).long() * k)
1231 .max(1, keepdim=True)
1234 self.train_input = (
1235 self.train_input * (k <= p).long()
1236 + self.t_end * (k == p + 1).long()
1237 + self.t_nul * (k > p + 1).long()
1239 k = torch.arange(self.test_input.size(1), device=self.test_input.device)[
1243 ((self.test_input == self.t_prog).long() * k)
1244 .max(1, keepdim=True)
1248 self.test_input * (k <= p).long()
1249 + self.t_end * (k == p + 1).long()
1250 + self.t_nul * (k > p + 1).long()
1253 if logger is not None:
1254 logger(f"value_max {val_max}")
1255 for x in self.train_input[:25]:
1256 end = (x != self.t_nul).nonzero().max().item() + 1
1257 seq = [self.id2token[i.item()] for i in x[:end]]
1259 logger(f"example_seq {s}")
1261 self.nb_codes = max(self.train_input.max(), self.test_input.max()) + 1
1263 def batches(self, split="train", nb_to_use=-1, desc=None):
1264 assert split in {"train", "test"}
1265 input = self.train_input if split == "train" else self.test_input
1267 input = input[:nb_to_use]
1269 desc = f"epoch-{split}"
1270 for batch in tqdm.tqdm(
1271 input.split(self.batch_size), dynamic_ncols=True, desc=desc
1273 last = (batch != self.t_nul).max(0).values.nonzero().max() + 3
1274 batch = batch[:, :last].to(self.device)
1277 def vocabulary_size(self):
1278 return self.nb_codes
1280 def produce_results(
1281 self, n_epoch, model, result_dir, logger, deterministic_synthesis
1283 # --------------------------------------------------------------------
1284 def compute_nb_errors_prog(input, nb_to_log=0):
1285 result = input.clone()
1286 s = (result == self.t_prog).long()
1287 ar_mask = (s.cumsum(dim=1) - s).clamp(min=0, max=1)
1288 result = (1 - ar_mask) * result + ar_mask * self.t_nul
1290 masked_inplace_autoregression(
1295 deterministic_synthesis,
1299 sum_nb_total, sum_nb_errors = 0, 0
1300 for one_input, one_result in zip(input, result):
1301 seq = [self.id2token[i.item()] for i in one_result]
1302 nb_total, nb_errors, prog, stacks = rpl.compute_nb_errors(seq)
1304 sum_nb_errors += 0 if nb_errors == 0 else 1
1306 gt_seq = [self.id2token[i.item()] for i in one_input]
1307 _, _, gt_prog, _ = rpl.compute_nb_errors(gt_seq)
1308 gt_prog = " ".join([str(x) for x in gt_prog])
1309 prog = " ".join([str(x) for x in prog])
1310 comment = "*" if nb_errors == 0 else "-"
1311 logger(f"{comment} PROG [{gt_prog}] PREDICTED [{prog}]")
1312 for start_stack, target_stack, result_stack, correct in stacks:
1313 comment = "*" if correct else "-"
1314 start_stack = " ".join([str(x) for x in start_stack])
1315 target_stack = " ".join([str(x) for x in target_stack])
1316 result_stack = " ".join([str(x) for x in result_stack])
1318 f" {comment} [{start_stack}] -> [{target_stack}] PREDICTED [{result_stack}]"
1322 return sum_nb_total, sum_nb_errors
1324 # --------------------------------------------------------------------
1325 def compute_nb_errors_output(input, nb_to_log=0):
1326 result = input.clone()
1327 k = torch.arange(result.size(1), device=result.device)[None, :]
1329 ((result == self.t_output) * k).max(dim=1, keepdim=True).values
1332 ((result == self.t_prog) * k).max(dim=1, keepdim=True).values
1334 ar_mask = (k > last_output_idx).long() * (k < first_prog_idx).long()
1335 result = (1 - ar_mask) * result + ar_mask * self.t_nul
1337 masked_inplace_autoregression(
1342 deterministic_synthesis,
1346 sum_nb_total, sum_nb_errors = 0, 0
1347 for one_input, one_result, i, j in zip(
1348 input, result, last_output_idx, first_prog_idx
1350 seq = [self.id2token[i.item()] for i in one_result]
1352 correct = (one_input - one_result).abs().max() == 0
1353 sum_nb_errors += 0 if correct else 1
1356 self.id2token[i.item()] for i in one_result[i : j + 1]
1359 self.id2token[i.item()] for i in one_input[i : j + 1]
1361 comment = "*" if correct else "-"
1362 result_stack = " ".join([str(x) for x in result_stack])
1363 target_stack = " ".join([str(x) for x in target_stack])
1365 f"output_test {comment} [{target_stack}] PREDICTED [{result_stack}]"
1369 return sum_nb_total, sum_nb_errors
1371 # --------------------------------------------------------------------
1373 if not self.no_prog:
1374 test_nb_total, test_nb_errors = compute_nb_errors_prog(
1375 self.test_input[:1000].to(self.device), nb_to_log=10
1379 f"accuracy_prog_test {n_epoch} nb_total {test_nb_total} nb_errors {test_nb_errors} accuracy {100.0*(1-test_nb_errors/test_nb_total):.02f}%"
1382 logger(f"main_test_accuracy {n_epoch} {1-test_nb_errors/test_nb_total}")
1384 test_nb_total, test_nb_errors = compute_nb_errors_output(
1385 self.test_input[:1000].to(self.device), nb_to_log=10
1389 f"accuracy_output_test {n_epoch} nb_total {test_nb_total} nb_errors {test_nb_errors} accuracy {100.0*(1-test_nb_errors/test_nb_total):.02f}%"
1392 if save_attention_image is None:
1393 logger("no save_attention_image (is pycairo installed?)")
1395 ns = torch.randint(self.test_input.size(0), (1,)).item()
1396 input = self.test_input[ns : ns + 1].clone()
1397 last = (input != self.t_nul).max(0).values.nonzero().max() + 3
1398 input = input[:, :last].to(self.device)
1400 with torch.autograd.no_grad():
1403 model.record_attention(True)
1404 model(BracketedSequence(input))
1406 ram = model.retrieve_attention()
1407 model.record_attention(False)
1409 tokens_output = [self.id2token[i.item()] for i in input[0]]
1410 tokens_input = ["n/a"] + tokens_output[:-1]
1411 for n_head in range(ram[0].size(1)):
1412 filename = os.path.join(
1413 result_dir, f"rpl_attention_{n_epoch}_h{n_head}.pdf"
1415 attention_matrices = [m[0, n_head] for m in ram]
1416 save_attention_image(
1422 # min_total_attention=0.9,
1426 logger(f"wrote {filename}")
1429 ######################################################################
1436 def tensorize(self, sequences):
1437 len_max = max([len(x) for x in sequences])
1442 [self.char2id[c] for c in s + "#" * (len_max - len(s))]
1459 device=torch.device("cpu"),
1463 self.batch_size = batch_size
1464 self.device = device
1466 train_sequences = expr.generate_sequences(
1468 nb_variables=nb_variables,
1469 length=sequence_length,
1470 operand_max=operand_max,
1471 result_max=result_max,
1474 test_sequences = expr.generate_sequences(
1476 nb_variables=nb_variables,
1477 length=sequence_length,
1478 operand_max=operand_max,
1479 result_max=result_max,
1482 symbols = list(set("#" + "".join(train_sequences + test_sequences)))
1485 self.char2id = dict([(c, n) for n, c in enumerate(symbols)])
1486 self.id2char = dict([(n, c) for c, n in self.char2id.items()])
1488 self.filler, self.space = self.char2id["#"], self.char2id[" "]
1490 self.train_input = self.tensorize(train_sequences)
1491 self.test_input = self.tensorize(test_sequences)
1493 self.nb_codes = max(self.train_input.max(), self.test_input.max()) + 1
1495 def batches(self, split="train", nb_to_use=-1, desc=None):
1496 assert split in {"train", "test"}
1497 input = self.train_input if split == "train" else self.test_input
1499 input = input[:nb_to_use]
1501 desc = f"epoch-{split}"
1502 for batch in tqdm.tqdm(
1503 input.split(self.batch_size), dynamic_ncols=True, desc=desc
1505 last = (batch != self.filler).max(0).values.nonzero().max() + 3
1506 batch = batch[:, :last]
1509 def vocabulary_size(self):
1510 return self.nb_codes
1512 def seq2str(self, s):
1513 return "".join([self.id2char[k.item()] for k in s])
1515 def produce_results(
1521 deterministic_synthesis,
1524 def compute_nb_correct(input):
1525 result = input.clone()
1526 s = (result == self.space).long()
1527 ar_mask = (s.cumsum(dim=1) - s).clamp(min=0, max=1)
1528 result = (1 - ar_mask) * result + ar_mask * self.filler
1529 masked_inplace_autoregression(
1534 deterministic_synthesis,
1538 nb_total = input.size(0)
1539 nb_correct = (input == result).long().min(1).values.sum()
1541 #######################################################################
1542 # Comput predicted vs. true variable values
1544 nb_delta = torch.zeros(5, dtype=torch.int64)
1547 values_input = expr.extract_results([self.seq2str(s) for s in input])
1548 values_result = expr.extract_results([self.seq2str(s) for s in result])
1550 filename = os.path.join(result_dir, f"expr_result_{n_epoch:04d}.txt")
1552 with open(filename, "w") as f:
1553 for i, r in zip(values_input, values_result):
1554 for n, vi in i.items():
1556 f.write(f"{vi} {-1 if vr is None else vr}\n")
1558 if vr is None or vr < 0:
1562 if d >= nb_delta.size(0):
1567 ######################################################################
1569 return nb_total, nb_correct, nb_delta, nb_missed
1576 ) = compute_nb_correct(self.test_input[:10000])
1579 f"accuracy_test {n_epoch} nb_total {test_nb_total} nb_correct {test_nb_correct} accuracy {(100.0*test_nb_correct)/test_nb_total:.02f}%"
1582 logger(f"main_test_accuracy {n_epoch} {test_nb_correct/test_nb_total}")
1584 nb_total = test_nb_delta.sum() + test_nb_missed
1585 for d in range(test_nb_delta.size(0)):
1587 f"error_value {n_epoch} delta {d} {test_nb_delta[d]} {test_nb_delta[d]*100/nb_total:.02f}%"
1590 f"error_value {n_epoch} missed {test_nb_missed} {test_nb_missed*100/nb_total:.02f}%"
1593 ##############################################################
1594 # Log a few generated sequences
1595 if input_file is None:
1596 input = self.test_input[:10]
1598 with open(input_file, "r") as f:
1599 sequences = [e.strip() for e in f.readlines()]
1600 sequences = [s + " " + "#" * 50 for s in sequences]
1601 input = self.tensorize(sequences)
1603 result = input.clone()
1604 s = (result == self.space).long()
1605 ar_mask = (s.cumsum(dim=1) - s).clamp(min=0, max=1)
1606 result = (1 - ar_mask) * result + ar_mask * self.filler
1608 for n in range(result.size(0)):
1609 logger(f"test_before {self.seq2str(result[n])}")
1611 masked_inplace_autoregression(
1616 deterministic_synthesis,
1620 correct = (1 - ar_mask) * self.space + ar_mask * input
1621 for n in range(result.size(0)):
1622 comment = "GOOD" if (result[n] - input[n]).abs().max() == 0 else ""
1623 logger(f"test_after {self.seq2str(result[n])} {comment}")
1624 logger(f"truth {self.seq2str(correct[n])}")
1625 ##############################################################
1628 ######################################################################
1634 # Make a tensor from a list of strings
1635 def str2tensor(self, descr):
1636 token_descr = [s.strip().split(" ") for s in descr]
1637 l = max([len(s) for s in token_descr])
1638 token_descr = [s + ["#"] * (l - len(s)) for s in token_descr]
1639 id_descr = [[self.token2id[u] for u in s] for s in token_descr]
1640 return torch.tensor(id_descr, device=self.device)
1642 # Make a list of strings from a tensor
1643 def tensor2str(self, x):
1644 return [" ".join([self.id2token[t.item()] for t in r]) for r in x]
1646 # trim all the tensors in the tuple z to remove as much token from
1647 # left and right in the first tensor. If z is a tuple, all its
1648 # elements are trimed according to the triming for the first
1649 def trim(self, z, token="#"):
1650 n = self.token2id[token]
1651 if type(z) == tuple:
1653 i = (1 - (F.pad(x, (1, 1), value=n) == n).min(0).values.long()).cumsum(0)
1654 a, b = (i == 0).nonzero().max(), (i == i.max()).nonzero().min()
1655 return tuple([t[:, a:b] for t in z])
1657 i = (1 - (F.pad(z, (1, 1), value=n) == n).min(0).values.long()).cumsum(0)
1658 a, b = (i == 0).nonzero().max(), (i == i.max()).nonzero().min()
1661 ######################
1671 device=torch.device("cpu"),
1675 self.device = device
1676 self.batch_size = batch_size
1677 self.grid_factory = grid.GridFactory(size=size)
1678 self.fraction_play = fraction_play
1680 if logger is not None:
1682 f"generating {nb_train_samples+nb_test_samples} samples (can take some time)"
1685 self.train_descr = self.grid_factory.generate_samples(
1686 nb=nb_train_samples,
1687 fraction_play=fraction_play,
1688 progress_bar=lambda r: tqdm.tqdm(r),
1691 self.test_descr = self.grid_factory.generate_samples(
1692 nb=nb_test_samples, fraction_play=0.0, progress_bar=lambda r: tqdm.tqdm(r)
1695 if fraction_play > 0:
1696 self.play_descr = self.grid_factory.generate_samples(
1697 nb=25, fraction_play=1.0, progress_bar=lambda r: tqdm.tqdm(r)
1700 self.play_descr = []
1702 # Build the tokenizer
1704 for d in [self.train_descr, self.test_descr, self.play_descr]:
1706 for t in s.strip().split(" "):
1708 # make this set a sorted list to get the same tensors given
1710 tokens = list(tokens)
1712 tokens = ["#"] + tokens
1713 self.token2id = dict([(t, n) for n, t in enumerate(tokens)])
1714 self.id2token = dict([(n, t) for n, t in enumerate(tokens)])
1715 self.t_nul = self.token2id["#"]
1716 self.t_true = self.token2id["true"]
1717 self.t_false = self.token2id["false"]
1718 # self.t_pipe = self.token2id["|"]
1720 # Tokenize the train and test sets
1721 self.train_input = self.str2tensor(self.train_descr)
1722 self.test_input = self.str2tensor(self.test_descr)
1724 None if len(self.play_descr) == 0 else self.str2tensor(self.play_descr)
1727 def batches(self, split="train", nb_to_use=-1, desc=None):
1728 assert split in {"train", "test"}
1729 input = self.train_input if split == "train" else self.test_input
1730 for batch in tqdm.tqdm(
1731 input.split(self.batch_size), dynamic_ncols=True, desc=f"epoch-{split}"
1733 yield self.trim(batch)
1735 def vocabulary_size(self):
1736 return len(self.token2id)
1738 def produce_results(
1739 self, n_epoch, model, result_dir, logger, deterministic_synthesis
1741 correct = self.test_input[:1000]
1742 result = correct.clone()
1743 ar_mask = torch.logical_or(result == self.t_true, result == self.t_false).long()
1744 result *= 1 - ar_mask # paraaaaanoiaaaaaaa
1746 logger(f"----------------------------------------------------------")
1748 for e in self.tensor2str(result[:10]):
1749 logger(f"test_before {e}")
1751 masked_inplace_autoregression(
1756 deterministic_synthesis,
1760 logger(f"----------------------------------------------------------")
1762 for e in self.tensor2str(result[:10]):
1763 logger(f"test_after {e}")
1765 logger(f"----------------------------------------------------------")
1767 nb_total = ar_mask.sum().item()
1768 nb_correct = ((correct == result).long() * ar_mask).sum().item()
1770 logger(f"test_performance {n_epoch} {nb_total=} {nb_correct=}")
1771 logger(f"main_test_accuracy {n_epoch} {nb_correct / nb_total}")
1773 if self.play_input is not None:
1774 result = self.play_input.clone()
1775 ar_mask = (result == self.t_pipe).long().cumsum(dim=1).clamp(max=1)
1776 result *= 1 - ar_mask # paraaaaanoiaaaaaaa
1778 logger(f"----------------------------------------------------------")
1780 for e in self.tensor2str(result[:10]):
1781 logger(f"play_before {e}")
1783 masked_inplace_autoregression(
1788 deterministic_synthesis,
1792 logger(f"----------------------------------------------------------")
1794 for e in self.tensor2str(result[:10]):
1795 logger(f"play_after {e}")
1797 logger(f"----------------------------------------------------------")
1800 ######################################################################
1806 ######################
1815 device=torch.device("cpu"),
1819 self.device = device
1820 self.batch_size = batch_size
1821 self.nb_samples_per_mlp = 256
1823 if logger is not None:
1825 f"generating {nb_train_samples+nb_test_samples} samples (can take some time)"
1828 seq, q_test_set, test_error = qmlp.generate_sequence_and_test_set(
1829 nb_mlps=nb_train_samples + nb_test_samples,
1830 nb_samples=self.nb_samples_per_mlp,
1834 nb_mlps_per_batch=1024,
1837 self.train_input = seq[:nb_train_samples]
1838 self.train_q_test_set = q_test_set[:nb_train_samples]
1839 self.train_ref_test_errors = test_error[:nb_train_samples]
1840 self.test_input = seq[nb_train_samples:]
1841 self.test_q_test_set = q_test_set[nb_train_samples:]
1842 self.test_ref_test_errors = test_error[nb_train_samples:]
1844 filename = os.path.join(result_dir, f"train_errors_ref.dat")
1845 with open(filename, "w") as f:
1846 for e in self.train_ref_test_errors:
1849 filename = os.path.join(result_dir, f"test_errors_ref.dat")
1850 with open(filename, "w") as f:
1851 for e in self.test_ref_test_errors:
1854 self.nb_codes = max(self.train_input.max(), self.test_input.max()) + 1
1856 def batches(self, split="train", nb_to_use=-1, desc=None):
1857 assert split in {"train", "test"}
1858 input = self.train_input if split == "train" else self.test_input
1859 for batch in tqdm.tqdm(
1860 input.split(self.batch_size), dynamic_ncols=True, desc=f"epoch-{split}"
1864 def vocabulary_size(self):
1865 return self.nb_codes
1867 def produce_results(
1868 self, n_epoch, model, result_dir, logger, deterministic_synthesis
1870 correct = self.test_input[:1000]
1871 result = correct.clone()
1873 torch.arange(result.size(1), device=result.device)
1874 > self.nb_samples_per_mlp * 3 + 1
1876 ar_mask = ar_mask.expand_as(result)
1877 result *= 1 - ar_mask # paraaaaanoiaaaaaaa
1879 masked_inplace_autoregression(
1884 deterministic_synthesis,
1888 q_train_set = result[:, : self.nb_samples_per_mlp * 3]
1889 q_params = result[:, self.nb_samples_per_mlp * 3 + 1 :]
1890 error_test = qmlp.evaluate_q_params(q_params, self.test_q_test_set)
1892 filename = os.path.join(result_dir, f"test_errors_{n_epoch:04d}.dat")
1893 with open(filename, "w") as f:
1894 for e in error_test:
1898 ######################################################################
1915 device=torch.device("cpu"),
1919 self.batch_size = batch_size
1920 self.device = device
1922 self.world = greed.GreedWorld(height, width, T, nb_walls, nb_coins)
1924 states, actions, rewards = self.world.generate_episodes(
1925 nb_train_samples + nb_test_samples
1927 seq = self.world.episodes2seq(states, actions, rewards)
1928 self.train_input = seq[:nb_train_samples].to(self.device)
1929 self.test_input = seq[nb_train_samples:].to(self.device)
1931 def wipe_lookahead_rewards(self, batch):
1932 t = torch.arange(batch.size(1), device=batch.device)[None, :]
1933 u = torch.randint(batch.size(1), (batch.size(0), 1), device=batch.device)
1934 lr_mask = (t <= u).long() * (
1935 t % self.world.it_len == self.world.index_lookahead_reward
1939 lr_mask * self.world.lookahead_reward2code(greed.REWARD_UNKNOWN)
1940 + (1 - lr_mask) * batch
1943 def batches(self, split="train", nb_to_use=-1, desc=None):
1944 assert split in {"train", "test"}
1945 input = self.train_input if split == "train" else self.test_input
1947 input = input[:nb_to_use]
1949 desc = f"epoch-{split}"
1950 for batch in tqdm.tqdm(
1951 input.split(self.batch_size), dynamic_ncols=True, desc=desc
1953 yield self.wipe_lookahead_rewards(batch)
1955 def vocabulary_size(self):
1956 return self.world.nb_codes
1958 def thinking_autoregression(
1959 self, n_epoch, model, result_dir, logger, deterministic_synthesis, nmax=1000
1963 def ar(result, ar_mask, logit_biases=None):
1964 ar_mask = ar_mask.expand_as(result)
1965 result *= 1 - ar_mask
1966 masked_inplace_autoregression(
1971 deterministic_synthesis=deterministic_synthesis,
1972 logit_biases=logit_biases,
1974 progress_bar_desc=None,
1976 warnings.warn("keeping thinking snapshots", RuntimeWarning)
1977 snapshots.append(result[:100].detach().clone())
1979 # Generate iteration after iteration
1981 result = self.test_input[:250].clone()
1982 # Erase all the content but that of the first iteration
1983 result[:, self.world.it_len :] = -1
1984 # Set the lookahead_reward of the firs to UNKNOWN
1985 result[:, self.world.index_lookahead_reward] = self.world.lookahead_reward2code(
1986 greed.REWARD_UNKNOWN
1989 t = torch.arange(result.size(1), device=result.device)[None, :]
1992 range(0, result.size(1), self.world.it_len),
1995 # Generate the next state but keep the initial one, the
1996 # lookahead_reward of previous iterations are set to
2000 :, u + self.world.index_lookahead_reward
2001 ] = self.world.lookahead_reward2code(greed.REWARD_UNKNOWN)
2002 ar_mask = (t >= u + self.world.index_states).long() * (
2003 t < u + self.world.index_states + self.world.state_len
2007 # Generate the action and reward with lookahead_reward to +1
2009 :, u + self.world.index_lookahead_reward
2010 ] = self.world.lookahead_reward2code(greed.REWARD_PLUS)
2011 ar_mask = (t >= u + self.world.index_reward).long() * (
2012 t <= u + self.world.index_action
2016 # Set the lookahead_reward to UNKNOWN for the next iterations
2018 :, u + self.world.index_lookahead_reward
2019 ] = self.world.lookahead_reward2code(greed.REWARD_UNKNOWN)
2021 filename = os.path.join(result_dir, f"test_thinking_compute_{n_epoch:04d}.txt")
2022 with open(filename, "w") as f:
2023 for n in range(snapshots[0].size(0)):
2025 lr, s, a, r = self.world.seq2episodes(
2028 str = self.world.episodes2str(
2029 lr, s, a, r, unicode=True, ansi_colors=True
2034 # Saving the generated sequences
2036 lr, s, a, r = self.world.seq2episodes(result)
2037 str = self.world.episodes2str(lr, s, a, r, unicode=True, ansi_colors=True)
2039 filename = os.path.join(result_dir, f"test_thinking_seq_{n_epoch:04d}.txt")
2040 with open(filename, "w") as f:
2042 logger(f"wrote {filename}")
2044 def produce_results(
2045 self, n_epoch, model, result_dir, logger, deterministic_synthesis, nmax=1000
2047 result = self.wipe_lookahead_rewards(self.test_input[:250].clone())
2049 # Saving the ground truth
2051 lr, s, a, r = self.world.seq2episodes(
2054 str = self.world.episodes2str(lr, s, a, r, unicode=True, ansi_colors=True)
2056 filename = os.path.join(result_dir, f"test_true_seq_{n_epoch:04d}.txt")
2057 with open(filename, "w") as f:
2059 logger(f"wrote {filename}")
2061 # Re-generating from the first frame
2064 torch.arange(result.size(1), device=result.device) >= self.world.it_len
2066 ar_mask = ar_mask.expand_as(result)
2067 result *= 1 - ar_mask # paraaaaanoiaaaaaaa
2069 masked_inplace_autoregression(
2074 deterministic_synthesis,
2078 # Saving the generated sequences
2080 lr, s, a, r = self.world.seq2episodes(
2083 str = self.world.episodes2str(lr, s, a, r, unicode=True, ansi_colors=True)
2085 filename = os.path.join(result_dir, f"test_seq_{n_epoch:04d}.txt")
2086 with open(filename, "w") as f:
2088 logger(f"wrote {filename}")
2090 self.thinking_autoregression(
2091 n_epoch, model, result_dir, logger, deterministic_synthesis, nmax
2095 ######################################################################
2096 ######################################################################
2102 def save_image(self, input, result_dir, filename, logger):
2103 img = world.sample2img(self.train_input.to("cpu"), self.height, self.width)
2104 image_name = os.path.join(result_dir, filename)
2105 torchvision.utils.save_image(img.float() / 255.0, image_name, nrow=8, padding=2)
2106 logger(f"wrote {image_name}")
2115 device=torch.device("cpu"),
2119 self.batch_size = batch_size
2120 self.device = device
2124 self.train_input = world.generate(
2125 nb_train_samples, height=self.height, width=self.width
2127 self.train_ar_mask = (
2128 (torch.arange(self.train_input.size(1)) > self.train_input.size(1) // 2)
2130 .expand_as(self.train_input)
2133 self.test_input = world.generate(
2134 nb_test_samples, height=self.height, width=self.width
2136 self.test_ar_mask = (
2137 (torch.arange(self.test_input.size(1)) > self.test_input.size(1) // 2)
2139 .expand_as(self.test_input)
2142 self.train_input, self.train_ar_mask = self.train_input.to(
2144 ), self.train_ar_mask.to(device)
2145 self.test_input, self.test_ar_mask = self.test_input.to(
2147 ), self.test_ar_mask.to(device)
2149 self.nb_codes = max(self.train_input.max(), self.test_input.max()) + 1
2151 if result_dir is not None:
2153 self.train_input[:96], result_dir, f"world_train.png", logger
2156 def batches(self, split="train", nb_to_use=-1, desc=None):
2157 assert split in {"train", "test"}
2158 input = self.train_input if split == "train" else self.test_input
2160 input = input[:nb_to_use]
2162 desc = f"epoch-{split}"
2163 for batch in tqdm.tqdm(
2164 input.split(self.batch_size), dynamic_ncols=True, desc=desc
2168 def vocabulary_size(self):
2169 return self.nb_codes
2171 def produce_results(
2172 self, n_epoch, model, result_dir, logger, deterministic_synthesis, nmax=1000
2174 def compute_accuracy(input, ar_mask, logger=None):
2175 input, ar_mask = input[:nmax], ar_mask[:nmax]
2176 result = input.clone() * (1 - ar_mask)
2178 masked_inplace_autoregression(
2183 deterministic_synthesis,
2184 progress_bar_desc=None,
2188 nb_total, nb_correct = (
2190 (input == result).long().min(dim=1).values.sum(),
2193 return nb_total, nb_correct
2195 train_nb_total, train_nb_correct = compute_accuracy(
2196 self.train_input, self.train_ar_mask
2200 f"accuracy_train {n_epoch} nb_total {train_nb_total} nb_correct {train_nb_correct} accuracy {(100.0*train_nb_correct)/train_nb_total:.02f}%"
2203 test_nb_total, test_nb_correct = compute_accuracy(
2204 self.test_input, self.test_ar_mask, logger
2208 f"accuracy_test {n_epoch} nb_total {test_nb_total} nb_correct {test_nb_correct} accuracy {(100.0*test_nb_correct)/test_nb_total:.02f}%"
2211 main_test_accuracy = test_nb_correct / test_nb_total
2212 logger(f"main_test_accuracy {n_epoch} {main_test_accuracy}")
2214 ##############################
2216 input, ar_mask = self.test_input[:96], self.test_ar_mask[:96]
2217 result = input.clone() * (1 - ar_mask)
2219 masked_inplace_autoregression(
2224 deterministic_synthesis,
2225 progress_bar_desc=None,
2229 self.save_image(result, result_dir, f"world_result_{n_epoch:04d}.png", logger)
2231 return main_test_accuracy
2233 def store_new_quizzes(self, new_quizzes, for_train=True):
2234 input = self.train_input if for_train else self.test_input
2236 nb_current = input.size(0)
2237 nb_new = new_quizzes.size(0)
2238 if nb_new >= nb_current:
2239 input[...] = new_quizzes[:nb_current]
2241 nb_kept = nb_current - nb_new
2242 input[:nb_kept] = input[-nb_kept:].clone()
2243 input[nb_kept:] = new_quizzes
2245 def create_new_quizzes(self, n_epoch, result_dir, logger, nb, model, nb_runs):
2246 new_quizzes = torch.empty(
2247 nb, self.height * self.width * 2 + 1, device=self.device, dtype=torch.int64
2249 ar_mask = torch.full(new_quizzes.size(), 1, device=self.device)
2251 masked_inplace_autoregression(
2256 deterministic_synthesis=False,
2257 progress_bar_desc="new quizzes",
2261 nb_correct = torch.empty(nb, device=self.device, dtype=torch.int64)
2264 range(new_quizzes.size(0)), dynamic_ncols=True, desc="checking quizzes"
2266 result = new_quizzes[n][None, :].expand(nb_runs, -1).clone()
2268 (torch.arange(result.size(1), device=self.device) > result.size(1) // 2)
2273 masked_inplace_autoregression(
2278 deterministic_synthesis=False,
2279 progress_bar_desc=None,
2284 (new_quizzes[n][None, :] == result).long().min(dim=1).values.sum()
2287 return new_quizzes, nb_correct