def autoregression(
model, batch_size,
- nb_samples, nb_tokens_to_generate, starting_input = None,
+ nb_samples, nb_tokens_to_generate, primer = None,
device = torch.device('cpu')
):
results = torch.zeros(
dtype = torch.int64, device = device
)
- if starting_input is None:
+ if primer is None:
first = 0
else:
- first = starting_input.size(1)
- results = torch.cat((starting_input, results), 1)
+ first = primer.size(1)
+ results = torch.cat((primer, results), 1)
for input in results.split(batch_size):
for s in tqdm.tqdm(range(first, input.size(1)), desc = 'synth'):
class TaskPicoCLVR(Task):
+ def descr2tensor(self, descr):
+ t = [ [ self.token2id[u] for u in s ] for s in descr ]
+ return torch.tensor(t, device = self.device)
+
def __init__(self, batch_size,
height, width, nb_colors = 5,
device = torch.device('cpu')):
self.id2token = dict([ (n, t) for n, t in enumerate(tokens) ])
# Tokenize the train and test sets
- t = [ [ self.token2id[u] for u in s ] for s in self.train_descr ]
- self.train_input = torch.tensor(t, device = self.device)
- t = [ [ self.token2id[u] for u in s ] for s in self.test_descr ]
- self.test_input = torch.tensor(t, device = self.device)
+ self.train_input = descr2tensor(self.train_descr)
+ self.test_input = descr2tensor(self.test_descr)
def batches(self, split = 'train'):
assert split in { 'train', 'test' }
def vocabulary_size(self):
return len(self.token2id)
- def generate(self, primer, model, nb_tokens):
- t_primer = primer.strip().split(' ')
- t_generated = [ ]
-
- for j in range(nb_tokens):
- t = [ [ self.token2id[u] for u in t_primer + t_generated ] ]
- input = torch.tensor(t, device = self.device)
- input = F.pad(input, (0, 1)) # Add the next token, the one to predict
- output = model(input)
- logits = output[0, -1]
- if args.synthesis_sampling:
- dist = torch.distributions.categorical.Categorical(logits = logits)
- t_next = dist.sample()
- else:
- t_next = logits.argmax()
- t_generated.append(self.id2token[t_next.item()])
-
- return ' '.join(t_primer + t_generated)
+ def generate(self, descr_primer, model, nb_tokens):
+ results = autoregression(
+ model, self.batch_size,
+ 1, nb_tokens, primer = descr2tensor(descr_primer),
+ device = self.device
+ )
+ return ' '.join([ self.id2token[t.item()] for t in results.flatten() ])
def produce_results(self, n_epoch, model, nb_tokens = None):
if nb_tokens is None:
nb_tokens = self.height * self.width + 3
- descr = [ ]
+ result_descr = [ ]
nb_per_primer = 8
- for primer in [
+ for descr_primer in [
'red above green <sep> green top <sep> blue right of red <img>',
'there is red <sep> there is yellow <sep> there is blue <img>',
'red below yellow <sep> yellow below green <sep> green below blue <sep> red right <sep> yellow left <sep> green right <sep> blue left <img>',
]:
for k in range(nb_per_primer):
- descr.append(self.generate(primer, model, nb_tokens))
+ result_descr.append(self.generate(descr_primer, model, nb_tokens))
- img = [ picoclvr.descr2img(d, height = self.height, width = self.width) for d in descr ]
+ img = [ picoclvr.descr2img(d, height = self.height, width = self.width)
+ for d in result_descr ]
img = torch.cat(img, 0)
image_name = f'result_picoclvr_{n_epoch:04d}.png'
torchvision.utils.save_image(
log_string(f'wrote {image_name}')
np = picoclvr.nb_properties(
- descr,
+ result_descr,
height = self.height, width = self.width
)
nb_requested_properties, _, nb_missing_properties = zip(*np)
- log_string(f'nb_requested_properties {sum(nb_requested_properties) / len(descr):.02f} nb_missing_properties {sum(nb_missing_properties) / len(descr):.02f}')
+ log_string(f'nb_requested_properties {sum(nb_requested_properties) / len(result_descr):.02f} nb_missing_properties {sum(nb_missing_properties) / len(result_descr):.02f}')
######################################################################
for input in task.batches(split = 'train'):
token_count += F.one_hot(input, num_classes = task.vocabulary_size()).sum((0, 1))
token_probas = token_count / token_count.sum()
-h = -torch.xlogy(token_probas, token_probas).sum()
-train_set_perplexity = math.exp(h)
-log_string(f'train set perplexity {train_set_perplexity}')
+entropy = -torch.xlogy(token_probas, token_probas).sum()
+train_set_perplexity = math.exp(entropy)
+#log_string(f'train set perplexity {train_set_perplexity}')
for k in range(nb_epochs_finished, nb_epochs):
train_perplexity = math.exp(min(100, acc_train_loss/nb_train_samples))
test_perplexity = math.exp(min(100, acc_test_loss/nb_test_samples))
- log_string(f'perplexity {k} train {train_perplexity} test {test_perplexity}')
+ log_string(f'perplexity {k} train_set {train_set_perplexity} train_prediction {train_perplexity} test_prediction {test_perplexity}')
task.produce_results(k, model)