X-Git-Url: https://fleuret.org/cgi-bin/gitweb/gitweb.cgi?a=blobdiff_plain;ds=inline;f=tasks.py;h=49b83ecd3c4a1c6b5763ed41e7f9a97d5c40de1f;hb=d16410119a4e5c1117f7f0fbbe80e3e54f81f28b;hp=443419eb340704273b64152edadb1286aae50cbf;hpb=798d9526e726b644979cf1124e714f705fdd5966;p=culture.git diff --git a/tasks.py b/tasks.py index 443419e..49b83ec 100755 --- a/tasks.py +++ b/tasks.py @@ -2093,3 +2093,195 @@ class Greed(Task): ###################################################################### +###################################################################### + +import world + + +class World(Task): + def save_image(self, input, result_dir, filename, logger): + img = world.sample2img(self.train_input.to("cpu"), self.height, self.width) + image_name = os.path.join(result_dir, filename) + torchvision.utils.save_image(img.float() / 255.0, image_name, nrow=8, padding=2) + logger(f"wrote {image_name}") + + def __init__( + self, + nb_train_samples, + nb_test_samples, + batch_size, + result_dir=None, + logger=None, + device=torch.device("cpu"), + ): + super().__init__() + + self.batch_size = batch_size + self.device = device + self.height = 6 + self.width = 8 + + self.train_input = world.generate( + nb_train_samples, height=self.height, width=self.width + ) + self.train_ar_mask = ( + (torch.arange(self.train_input.size(1)) > self.train_input.size(1) // 2) + .long()[None, :] + .expand_as(self.train_input) + ) + + self.test_input = world.generate( + nb_test_samples, height=self.height, width=self.width + ) + self.test_ar_mask = ( + (torch.arange(self.test_input.size(1)) > self.test_input.size(1) // 2) + .long()[None, :] + .expand_as(self.test_input) + ) + + self.train_input, self.train_ar_mask = self.train_input.to( + device + ), self.train_ar_mask.to(device) + self.test_input, self.test_ar_mask = self.test_input.to( + device + ), self.test_ar_mask.to(device) + + self.nb_codes = max(self.train_input.max(), self.test_input.max()) + 1 + + if result_dir is not None: + self.save_image( + self.train_input[:96], result_dir, f"world_train.png", logger + ) + + def batches(self, split="train", nb_to_use=-1, desc=None): + assert split in {"train", "test"} + input = self.train_input if split == "train" else self.test_input + if nb_to_use > 0: + input = input[:nb_to_use] + if desc is None: + desc = f"epoch-{split}" + for batch in tqdm.tqdm( + input.split(self.batch_size), dynamic_ncols=True, desc=desc + ): + yield batch + + def vocabulary_size(self): + return self.nb_codes + + def produce_results( + self, n_epoch, model, result_dir, logger, deterministic_synthesis, nmax=1000 + ): + def compute_accuracy(input, ar_mask, logger=None): + input, ar_mask = input[:nmax], ar_mask[:nmax] + result = input.clone() * (1 - ar_mask) + + masked_inplace_autoregression( + model, + self.batch_size, + result, + ar_mask, + deterministic_synthesis, + progress_bar_desc=None, + device=self.device, + ) + + nb_total, nb_correct = ( + input.size(0), + (input == result).long().min(dim=1).values.sum(), + ) + + return nb_total, nb_correct + + train_nb_total, train_nb_correct = compute_accuracy( + self.train_input, self.train_ar_mask + ) + + logger( + f"accuracy_train {n_epoch} nb_total {train_nb_total} nb_correct {train_nb_correct} accuracy {(100.0*train_nb_correct)/train_nb_total:.02f}%" + ) + + test_nb_total, test_nb_correct = compute_accuracy( + self.test_input, self.test_ar_mask, logger + ) + + logger( + f"accuracy_test {n_epoch} nb_total {test_nb_total} nb_correct {test_nb_correct} accuracy {(100.0*test_nb_correct)/test_nb_total:.02f}%" + ) + + main_test_accuracy = test_nb_correct / test_nb_total + logger(f"main_test_accuracy {n_epoch} {main_test_accuracy}") + + ############################## + + input, ar_mask = self.test_input[:96], self.test_ar_mask[:96] + result = input.clone() * (1 - ar_mask) + + masked_inplace_autoregression( + model, + self.batch_size, + result, + ar_mask, + deterministic_synthesis, + progress_bar_desc=None, + device=self.device, + ) + + self.save_image(result, result_dir, f"world_result_{n_epoch:04d}.png", logger) + + return main_test_accuracy + + def store_new_quizzes(self, new_quizzes, for_train=True): + input = self.train_input if for_train else self.test_input + + nb_current = input.size(0) + nb_new = new_quizzes.size(0) + if nb_new >= nb_current: + input[...] = new_quizzes[:nb_current] + else: + nb_kept = nb_current - nb_new + input[:nb_kept] = input[-nb_kept:].clone() + input[nb_kept:] = new_quizzes + + def create_new_quizzes(self, n_epoch, result_dir, logger, nb, model, nb_runs): + new_quizzes = torch.empty( + nb, self.height * self.width * 2 + 1, device=self.device, dtype=torch.int64 + ) + ar_mask = torch.full(new_quizzes.size(), 1, device=self.device) + + masked_inplace_autoregression( + model, + self.batch_size, + new_quizzes, + ar_mask, + deterministic_synthesis=False, + progress_bar_desc="new quizzes", + device=self.device, + ) + + nb_correct = torch.empty(nb, device=self.device, dtype=torch.int64) + + for n in tqdm.tqdm( + range(new_quizzes.size(0)), dynamic_ncols=True, desc="checking quizzes" + ): + result = new_quizzes[n][None, :].expand(nb_runs, -1).clone() + ar_mask = ( + (torch.arange(result.size(1), device=self.device) > result.size(1) // 2) + .long()[None, :] + .expand_as(result) + ) + + masked_inplace_autoregression( + model, + self.batch_size, + result, + ar_mask, + deterministic_synthesis=False, + progress_bar_desc=None, + device=self.device, + ) + + nb_correct[n] = ( + (new_quizzes[n][None, :] == result).long().min(dim=1).values.sum() + ) + + return new_quizzes, nb_correct