id_descr = [ [ self.token2id[u] for u in s ] for s in token_descr ]
return torch.tensor(id_descr, device = self.device)
+ def trim(self, x, token = '<nul>'):
+ n = self.token2id[token]
+ i = (1 - (F.pad(x, (1, 1), value = n) == n).min(0).values.long()).cumsum(0)
+ a, b = (i == 0).nonzero().max(), (i == i.max()).nonzero().min()
+ return x[:, a:b]
+
def __init__(self, batch_size,
height, width, nb_colors = 5,
device = torch.device('cpu')):
assert split in { 'train', 'test' }
input = self.train_input if split == 'train' else self.test_input
for batch in tqdm.tqdm(input.split(self.batch_size), desc = f'epoch-{split}'):
- yield batch
+ yield self.trim(batch)
def vocabulary_size(self):
return len(self.token2id)
def produce_results(self, n_epoch, model):
- nb_tokens = self.height * self.width + 3
+ nb_tokens_to_generate = self.height * self.width + 3
result_descr = [ ]
nb_per_primer = 8
'green bottom <sep> yellow bottom <sep> green left of blue <sep> yellow right of blue <sep> blue top <img>',
]:
- for k in range(nb_per_primer):
- results = autoregression(
- model, self.batch_size,
- nb_samples = 1, nb_tokens_to_generate = nb_tokens,
- primer = self.tensorize([ primer_descr ]),
- device = self.device
- )
- r = ' '.join([ self.id2token[t.item()] for t in results.flatten() ])
- result_descr.append(r)
+ results = autoregression(
+ model,
+ self.batch_size,
+ nb_samples = nb_per_primer,
+ nb_tokens_to_generate = nb_tokens_to_generate,
+ primer = self.tensorize([ primer_descr ]).expand(nb_per_primer, -1),
+ device = self.device
+ )
+
+ l = [ ' '.join([ self.id2token[t.item()] for t in r ]) for r in results ]
+ result_descr += l
+
+ np = picoclvr.nb_properties(
+ result_descr,
+ height = self.height, width = self.width
+ )
+
+ nb_requested_properties, _, nb_missing_properties = zip(*np)
+
+ log_string(f'nb_requested_properties {sum(nb_requested_properties) / len(result_descr):.02f} nb_missing_properties {sum(nb_missing_properties) / len(result_descr):.02f}')
img = [
picoclvr.descr2img(d, height = self.height, width = self.width)
)
log_string(f'wrote {image_name}')
- np = picoclvr.nb_properties(
- result_descr,
- height = self.height, width = self.width
- )
-
- nb_requested_properties, _, nb_missing_properties = zip(*np)
-
- log_string(f'nb_requested_properties {sum(nb_requested_properties) / len(result_descr):.02f} nb_missing_properties {sum(nb_missing_properties) / len(result_descr):.02f}')
-
######################################################################
class TaskWiki103(Task):
token_probas = token_count / token_count.sum()
entropy = -torch.xlogy(token_probas, token_probas).sum()
train_set_perplexity = math.exp(entropy)
-#log_string(f'train set perplexity {train_set_perplexity}')
for k in range(nb_epochs_finished, nb_epochs):
for _ in range(nb_blocks):
trunk_blocks += [
Residual(
- nn.LayerNorm(dim_model),
+ nn.LayerNorm((dim_model,)),
QKVAttention(
dim_in = dim_model,
dim_qk = dim_keys,
),
),
Residual(
- nn.LayerNorm(dim_model),
+ nn.LayerNorm((dim_model,)),
nn.Linear(in_features = dim_model, out_features = dim_hidden),
nn.ReLU(),
nn.Linear(in_features = dim_hidden, out_features = dim_model),
x = self.embedding(x)
x = self.trunk(x)
x = self.readout(x)
- return x[:, :-1]
+ x = F.pad(x, (0, 0, 0, -1))
+ return x
######################################################################