-######################################################################
-
-import picoclvr
-
-
-class PicoCLVR(Task):
- # Make a tensor from a list of strings
- def tensorize(self, descr):
- token_descr = [s.strip().split(" ") for s in descr]
- l = max([len(s) for s in token_descr])
- token_descr = [s + ["<nul>"] * (l - len(s)) for s in token_descr]
- id_descr = [[self.token2id[u] for u in s] for s in token_descr]
- return torch.tensor(id_descr, device=self.device)
-
- # Make a list of strings from a tensor
- def detensorize(self, x):
- return [" ".join([self.id2token[t.item()] for t in r]) for r in x]
-
- # trim all the tensors in the tuple z to remove as much token from
- # left and right in the first tensor. If z is a tuple, all its
- # elements are trimed according to the triming for the first
- def trim(self, z, token="<nul>"):
- n = self.token2id[token]
- if type(z) == tuple:
- x = z[0]
- i = (1 - (F.pad(x, (1, 1), value=n) == n).min(0).values.long()).cumsum(0)
- a, b = (i == 0).nonzero().max(), (i == i.max()).nonzero().min()
- return tuple([t[:, a:b] for t in z])
- else:
- i = (1 - (F.pad(z, (1, 1), value=n) == n).min(0).values.long()).cumsum(0)
- a, b = (i == 0).nonzero().max(), (i == i.max()).nonzero().min()
- return z[:, a:b]
-
- ######################
-
- def __init__(
- self,
- nb_train_samples,
- nb_test_samples,
- batch_size,
- height,
- width,
- nb_colors=5,
- logger=None,
- device=torch.device("cpu"),
- pruner_train=None,
- pruner_eval=None,
- ):
- super().__init__()
-
- def generate_descr(nb, cache_suffix, pruner):
- return picoclvr.generate(
- nb,
- height=self.height,
- width=self.width,
- nb_colors=nb_colors,
- pruner=pruner,
- )
-
- self.height = height
- self.width = width
- self.batch_size = batch_size
- self.device = device
- self.pruner_train = pruner_train
- self.pruner_eval = pruner_eval
-
- if logger is not None:
- logger(
- f"generating {nb_train_samples+nb_test_samples} samples (can take some time)"
- )
-
- self.train_descr = generate_descr(
- nb_train_samples, "train", pruner=self.pruner_train
- )
- self.test_descr = generate_descr(nb_test_samples, "test", pruner=None)
-
- # Build the tokenizer
- tokens = {"<nul>", "<img>"}
- for d in [self.train_descr, self.test_descr]:
- for s in d:
- for t in s.strip().split(" "):
- tokens.add(t)
- # make this set a sorted list to get the same tensors given
- # the same descr
- tokens = list(tokens)
- tokens.sort()
- self.token2id = dict([(t, n) for n, t in enumerate(tokens)])
- self.id2token = dict([(n, t) for n, t in enumerate(tokens)])
- self.t_img, self.t_nul = self.token2id["<img>"], self.token2id["<nul>"]
-
- # Tokenize the train and test sets
- self.train_input = self.tensorize(self.train_descr)
- self.test_input = self.tensorize(self.test_descr)
-
- def batches(self, split="train"):
- assert split in {"train", "test"}
- input = self.train_input if split == "train" else self.test_input
- for batch in tqdm.tqdm(
- input.split(self.batch_size), dynamic_ncols=True, desc=f"epoch-{split}"
- ):
- yield self.trim(batch)
-
- def vocabulary_size(self):
- return len(self.token2id)
-
- def compute_missing_properties(
- self, n_epoch, model, logger, deterministic_synthesis, pruner=None
- ):
- acc_nb_requested_properties = []
- acc_nb_missing_properties = []
- acc_nb_results = 0
-
- for input in tqdm.tqdm(
- self.test_input.split(self.batch_size),
- dynamic_ncols=True,
- desc=f"test-properties",
- ):
- result = input.clone()
- ar_mask = (result == self.t_img).long().cumsum(dim=1).clamp(max=1)
- result = (1 - ar_mask) * result + ar_mask * self.t_nul
- masked_inplace_autoregression(
- model,
- self.batch_size,
- result,
- ar_mask,
- deterministic_synthesis,
- progress_bar_desc=None,
- device=self.device,
- )
-
- result_descr = self.detensorize(result)
- np = picoclvr.nb_properties(
- result_descr,
- height=self.height,
- width=self.width,
- pruner=pruner,
- )
- nb_requested_properties, _, nb_missing_properties = zip(*np)
- acc_nb_requested_properties += nb_requested_properties
- acc_nb_missing_properties += nb_missing_properties
- acc_nb_results += len(result_descr)
-
- nb_requested_properties = sum(acc_nb_requested_properties)
- nb_missing_properties = sum(acc_nb_missing_properties)
-
- prefix = "" if pruner is None else "pruned_"
- logger(f"nb_{prefix}samples {n_epoch} {acc_nb_results}")
- logger(
- f"property_{prefix}nb {n_epoch} requested {sum(acc_nb_requested_properties)} missing {sum(acc_nb_missing_properties)}"
- )
- logger(
- f"property_{prefix}miss {n_epoch} {100*nb_missing_properties/nb_requested_properties:.02f}%"
- )
-
- ######################################################################
-
- def produce_results(
- self, n_epoch, model, result_dir, logger, deterministic_synthesis
- ):
- self.compute_missing_properties(n_epoch, model, logger, deterministic_synthesis)
-
- if self.pruner_eval is not None:
- self.compute_missing_properties(n_epoch, model, self.pruner_eval)
-
- nb_tokens_to_generate = self.height * self.width + 3
- result_descr = []
- nb_per_primer = 8
- primer = []
-
- for primer_descr in [
- "red above green <sep> green top <sep> blue right of red",
- "there is red <sep> there is yellow <sep> there is blue",
- "red below yellow <sep> yellow below green <sep> green below blue <sep> red right <sep> yellow left <sep> green right <sep> blue left",
- "green bottom <sep> yellow bottom <sep> green left of blue <sep> yellow right of blue <sep> blue top",
- ]:
- primer += [primer_descr + " <img>"] * nb_per_primer
-
- result = self.tensorize(primer)
- fill = result.new_full(
- result.size()[:-1] + (self.height * self.width + 1,), self.t_nul
- )
- result = torch.cat((result, fill), 1)
- ar_mask = (result == self.t_nul).long()
- masked_inplace_autoregression(
- model,
- self.batch_size,
- result,
- ar_mask,
- deterministic_synthesis,
- device=self.device,
- )
- result_descr = self.detensorize(result)
-
- np = picoclvr.nb_properties(result_descr, height=self.height, width=self.width)
-
- acc_nb_requested_properties, _, acc_nb_missing_properties = zip(*np)
- acc_nb_results = len(result_descr)
-
- nb_requested_properties = sum(acc_nb_requested_properties)
- nb_missing_properties = sum(acc_nb_missing_properties)
-
- prefix = "demo_"
- logger(f"nb_{prefix}samples {n_epoch} {acc_nb_results}")
- logger(
- f"property_{prefix}nb {n_epoch} requested {sum(acc_nb_requested_properties)} missing {sum(acc_nb_missing_properties)}"
- )
- logger(
- f"property_{prefix}miss {n_epoch} {100*nb_missing_properties/nb_requested_properties:.02f}%"
- )
-
- img = picoclvr.descr2img(result_descr, height=self.height, width=self.width)
-
- if img.dim() == 5:
- if img.size(1) == 1:
- img = F.pad(img.squeeze(1), pad=(1, 1, 1, 1), value=64)
- else:
- img = torch.cat(
- [
- torchvision.utils.make_grid(x, padding=1, pad_value=64)[None]
- for x in img
- ],
- 0,
- )
-
- image_name = os.path.join(result_dir, f"picoclvr_result_{n_epoch:04d}.png")
- torchvision.utils.save_image(
- img / 255.0, image_name, nrow=nb_per_primer, padding=1, pad_value=0.0
- )
- logger(f"wrote {image_name}")
-
-
-######################################################################
-
-
-class MNIST(Task):
- def __init__(
- self, nb_train_samples, nb_test_samples, batch_size, device=torch.device("cpu")
- ):
- super().__init__()