class TaskPicoCLVR(Task):
- def descr2tensor(self, descr):
+ def tensorize(self, descr):
t = [ [ self.token2id[u] for u in s ] for s in descr ]
return torch.tensor(t, device = self.device)
descr = [ s.strip().split(' ') for s in descr ]
l = max([ len(s) for s in descr ])
- #descr = [ [ '<unk>' ] * (l - len(s)) + s for s in descr ]
- descr = [ s + [ '<unk>' ] * (l - len(s)) for s in descr ]
+ #descr = [ [ '<nul>' ] * (l - len(s)) + s for s in descr ]
+ descr = [ s + [ '<nul>' ] * (l - len(s)) for s in descr ]
return descr
self.id2token = dict([ (n, t) for n, t in enumerate(tokens) ])
# Tokenize the train and test sets
- self.train_input = descr2tensor(self.train_descr)
- self.test_input = descr2tensor(self.test_descr)
+ self.train_input = self.tensorize(self.train_descr)
+ self.test_input = self.tensorize(self.test_descr)
def batches(self, split = 'train'):
assert split in { 'train', 'test' }
def vocabulary_size(self):
return len(self.token2id)
- def generate(self, primer_descr, model, nb_tokens):
- results = autoregression(
- model, self.batch_size,
- nb_samples = 1, nb_tokens = nb_tokens, primer = descr2tensor(primer_descr),
- device = self.device
- )
- return ' '.join([ self.id2token[t.item()] for t in results.flatten() ])
-
def produce_results(self, n_epoch, model):
nb_tokens = self.height * self.width + 3
result_descr = [ ]
]:
for k in range(nb_per_primer):
- result_descr.append(self.generate(primer_descr, model, nb_tokens))
+ results = autoregression(
+ model, self.batch_size,
+ nb_samples = 1, nb_tokens = nb_tokens,
+ primer = self.tensorize(primer_descr),
+ device = self.device
+ )
+ r = ' '.join([ self.id2token[t.item()] for t in results.flatten() ])
+ result_descr.append(r)
+
+ img = [
+ picoclvr.descr2img(d, height = self.height, width = self.width)
+ for d in result_descr
+ ]
- img = [ picoclvr.descr2img(d, height = self.height, width = self.width)
- for d in result_descr ]
img = torch.cat(img, 0)
image_name = f'result_picoclvr_{n_epoch:04d}.png'
torchvision.utils.save_image(
self.vocab = torchtext.vocab.build_vocab_from_iterator(
yield_tokens(),
- specials = [ '<unk>', '<non>' ],
+ specials = [ '<unk>', '<nul>' ],
min_freq = self.min_freq
)
def tensorize(self, s):
a = max(len(x) for x in s)
- return torch.tensor([ self.vocab(x + [ '<non>' ] * (a - len(x))) for x in s ])
+ return torch.tensor([ self.vocab(x + [ '<nul>' ] * (a - len(x))) for x in s ])
def yield_batches(self, ds):
s = [ ]
else:
t_next = logits.argmax()
t_generated.append(self.vocab.lookup_token(t_next))
- if t_generated[-1] == '<non>': break
+ if t_generated[-1] == '<nul>': break
s = ' '.join(t_generated)