parser.add_argument('--log_filename',
type = str, default = 'train.log')
-parser.add_argument('--download',
- action='store_true', default = False)
-
parser.add_argument('--seed',
type = int, default = 0)
parser.add_argument('--nb_epochs',
- type = int, default = 100)
+ type = int, default = -1)
parser.add_argument('--batch_size',
type = int, default = 25)
##############################
# picoclvr options
-parser.add_argument('--picoclvr_many_colors',
- action='store_true', default = False)
+parser.add_argument('--picoclvr_nb_colors',
+ type = int, default = 5)
parser.add_argument('--picoclvr_height',
type = int, default = 12)
######################################################################
+def autoregression(
+ model,
+ nb_samples, nb_tokens_to_generate, starting_input = None,
+ device = torch.device('cpu')
+):
+ results = torch.zeros(
+ nb_samples, nb_tokens_to_generate,
+ dtype = torch.int64, device = device
+ )
+
+ if starting_input is None:
+ first = 0
+ else:
+ first = starting_input.size(1)
+ results = torch.cat((starting_input, results), 1)
+
+ for input in results.split(args.batch_size):
+ for s in tqdm.tqdm(range(first, input.size(1)), desc = 'synth'):
+ output = model(input)
+ logits = output[:, s]
+ if args.synthesis_sampling:
+ dist = torch.distributions.categorical.Categorical(logits = logits)
+ t_next = dist.sample()
+ else:
+ t_next = logits.argmax(1)
+ input[:, s] = t_next
+
+ return results
+
+######################################################################
+
class Task:
def batches(self, split = 'train'):
pass
class TaskPicoCLVR(Task):
def __init__(self, batch_size,
- height, width, many_colors = False,
+ height, width, nb_colors = 5,
device = torch.device('cpu')):
def generate_descr(nb):
descr = picoclvr.generate(
nb,
height = self.height, width = self.width,
- many_colors = many_colors
+ nb_colors = nb_colors
)
descr = [ s.strip().split(' ') for s in descr ]
self.train_descr = generate_descr((nb * 4) // 5)
self.test_descr = generate_descr((nb * 1) // 5)
+ # Build the tokenizer
tokens = set()
for d in [ self.train_descr, self.test_descr ]:
for s in d:
for j in range(nb_tokens):
t = [ [ self.token2id[u] for u in t_primer + t_generated ] ]
input = torch.tensor(t, device = self.device)
+ input = F.pad(input, (0, 1)) # Add the next token, the one to predict
output = model(input)
logits = output[0, -1]
if args.synthesis_sampling:
dist = torch.distributions.categorical.Categorical(logits = logits)
- t = dist.sample()
+ t_next = dist.sample()
else:
- t = logits.argmax()
- t_generated.append(self.id2token[t.item()])
+ t_next = logits.argmax()
+ t_generated.append(self.id2token[t_next.item()])
return ' '.join(t_primer + t_generated)
img = [ picoclvr.descr2img(d, height = self.height, width = self.width) for d in descr ]
img = torch.cat(img, 0)
- file_name = f'result_picoclvr_{n_epoch:04d}.png'
+ image_name = f'result_picoclvr_{n_epoch:04d}.png'
torchvision.utils.save_image(
img / 255.,
- file_name, nrow = nb_per_primer, pad_value = 0.8
+ image_name, nrow = nb_per_primer, pad_value = 0.8
)
- log_string(f'wrote {file_name}')
+ log_string(f'wrote {image_name}')
nb_missing = sum( [
x[2] for x in picoclvr.nb_missing_properties(
for j in range(nb_tokens):
input = self.tensorize([ t_primer + t_generated ]).to(self.device)
+ input = F.pad(input, (0, 1)) # Add the next token, the one to predict
output = model(input)
logits = output[0, -1]
if args.synthesis_sampling:
dist = torch.distributions.categorical.Categorical(logits = logits)
- t = dist.sample()
+ t_next = dist.sample()
else:
- t = logits.argmax()
- t_generated.append(self.vocab.lookup_token(t))
+ t_next = logits.argmax()
+ t_generated.append(self.vocab.lookup_token(t_next))
if t_generated[-1] == '<non>': break
s = ' '.join(t_generated)
return 256
def produce_results(self, n_epoch, model, nb_samples = 64):
- results = torch.zeros(nb_samples, 28 * 28, dtype = torch.int64, device = self.device)
- for input in results.split(self.batch_size):
- for s in tqdm.tqdm(range(input.size(1) - 1), desc = 'synth'):
- output = model(input)
- logits = output[:, s]
- if args.synthesis_sampling:
- dist = torch.distributions.categorical.Categorical(logits = logits)
- t = dist.sample()
- else:
- t = logits.argmax(1)
- input[:, s + 1] = t
-
+ results = autoregression(model, nb_samples, 28 * 28, device = self.device)
image_name = f'result_mnist_{n_epoch:04d}.png'
torchvision.utils.save_image(1 - results.reshape(-1, 1, 28, 28) / 255.,
image_name, nrow = 16, pad_value = 0.8)
######################################################################
-def check_causality(model):
- #m = model[1:]
- input = torch.rand(1, 5, dim_model).requires_grad_()
- output = m(input)
- a = torch.zeros(output.size(1), input.size(1))
- for k in range(output.size(1)):
- for d in range(output.size(2)):
- g, = torch.autograd.grad(output[0, k, d], input, retain_graph = True)
- a[k] += g.squeeze(0).pow(2).sum(1)
- print(a)
-
-######################################################################
-
log_string(f'device {device}')
if args.data == 'wiki103':
+ nb_epochs_default = 10
task = TaskWiki103(batch_size = args.batch_size, device = device)
elif args.data == 'mnist':
+ nb_epochs_default = 25
task = TaskMNIST(batch_size = args.batch_size, device = device)
elif args.data == 'picoclvr':
+ nb_epochs_default = 10
task = TaskPicoCLVR(batch_size = args.batch_size,
height = args.picoclvr_height,
width = args.picoclvr_width,
- many_colors = args.picoclvr_many_colors,
+ nb_colors = args.picoclvr_nb_colors,
device = device)
else:
raise ValueError(f'Unknown dataset {args.data}.')
nb_epochs_finished = 0
if args.no_checkpoint:
- log_string(f'Not trying to load checkpoint.')
+ log_string(f'not trying to load checkpoint.')
else:
try:
nb_epochs_finished = checkpoint['nb_epochs_finished']
model.load_state_dict(checkpoint['model_state'])
optimizer.load_state_dict(checkpoint['optimizer_state'])
- log_string(f'Checkpoint loaded with {nb_epochs_finished} epochs finished.')
+ log_string(f'checkpoint loaded with {nb_epochs_finished} epochs finished.')
except FileNotFoundError:
- log_string('Starting from scratch.')
+ log_string('starting from scratch.')
except:
- log_string('Error when loading the checkpoint.')
+ log_string('error when loading the checkpoint.')
exit(1)
######################################################################
-for k in range(nb_epochs_finished, args.nb_epochs):
+nb_epochs = args.nb_epochs if args.nb_epochs > 0 else nb_epochs_default
+
+token_count = 0
+for input in task.batches(split = 'train'):
+ token_count += F.one_hot(input, num_classes = task.vocabulary_size()).sum((0, 1))
+token_probas = token_count / token_count.sum()
+h = -torch.xlogy(token_probas, token_probas).sum()
+train_set_perplexity = math.exp(h)
+log_string(f'train set perplexity {train_set_perplexity}')
+
+for k in range(nb_epochs_finished, nb_epochs):
model.train()
for input in task.batches(split = 'train'):
input = input.to(device)
output = model(input)
- loss = F.cross_entropy(output[:, :-1].transpose(1, 2), input[:, 1:])
+ loss = F.cross_entropy(output.transpose(1, 2), input)
acc_train_loss += loss.item() * input.size(0)
nb_train_samples += input.size(0)
for input in task.batches(split = 'test'):
input = input.to(device)
output = model(input)
- loss = F.cross_entropy(output[:, :-1].transpose(1, 2), input[:, 1:])
+ loss = F.cross_entropy(output.transpose(1, 2), input)
acc_test_loss += loss.item() * input.size(0)
nb_test_samples += input.size(0)
train_perplexity = math.exp(min(100, acc_train_loss/nb_train_samples))
test_perplexity = math.exp(min(100, acc_test_loss/nb_test_samples))
- log_string(f'perplexity {k+1} train {train_perplexity} test {test_perplexity}')
+ log_string(f'perplexity {k} train {train_perplexity} test {test_perplexity}')
task.produce_results(k, model)