X-Git-Url: https://fleuret.org/cgi-bin/gitweb/gitweb.cgi?a=blobdiff_plain;ds=sidebyside;f=main.py;h=76aeebda3d75de43de36821eeccfde10de31ef21;hb=7dbeac21672006bcb2d4e316d6d83c40c87d3751;hp=4a332b82ab26318d210b479dcbebb5bc25ec6b38;hpb=1e7c259b1dd038a0f45dba96e872cd1121f38f96;p=mygpt.git diff --git a/main.py b/main.py index 4a332b8..76aeebd 100755 --- a/main.py +++ b/main.py @@ -24,9 +24,6 @@ parser = argparse.ArgumentParser(description = 'My own GPT.') parser.add_argument('--log_filename', type = str, default = 'train.log') -parser.add_argument('--download', - action='store_true', default = False) - parser.add_argument('--seed', type = int, default = 0) @@ -78,8 +75,8 @@ parser.add_argument('--checkpoint_name', ############################## # picoclvr options -parser.add_argument('--picoclvr_many_colors', - action='store_true', default = False) +parser.add_argument('--picoclvr_nb_colors', + type = int, default = 5) parser.add_argument('--picoclvr_height', type = int, default = 12) @@ -113,22 +110,34 @@ for n in vars(args): ###################################################################### -def produce_results( - self, - model, nb_samples, nb_tokens_to_generate, starting_input = None, - device = 'cpu' +def autoregression( + model, batch_size, + nb_samples, nb_tokens_to_generate, primer = None, + device = torch.device('cpu') ): - results = torch.zeros(nb_samples, nb_tokens_to_generate, dtype = torch.int64, device = device) - for input in results.split(self.batch_size): - for s in tqdm.tqdm(range(input.size(1) - 1), desc = 'synth'): + results = torch.zeros( + nb_samples, nb_tokens_to_generate, + dtype = torch.int64, device = device + ) + + if primer is None: + first = 0 + else: + first = primer.size(1) + results = torch.cat((primer, results), 1) + + for input in results.split(batch_size): + for s in tqdm.tqdm(range(first, input.size(1)), desc = 'synth'): output = model(input) logits = output[:, s] if args.synthesis_sampling: dist = torch.distributions.categorical.Categorical(logits = logits) - t = dist.sample() + t_next = dist.sample() else: - t = logits.argmax(1) - input[:, s + 1] = t + t_next = logits.argmax(1) + input[:, s] = t_next + + return results ###################################################################### @@ -148,19 +157,24 @@ import picoclvr class TaskPicoCLVR(Task): + def descr2tensor(self, descr): + t = [ [ self.token2id[u] for u in s ] for s in descr ] + return torch.tensor(t, device = self.device) + def __init__(self, batch_size, - height, width, many_colors = False, + height, width, nb_colors = 5, device = torch.device('cpu')): def generate_descr(nb): descr = picoclvr.generate( nb, height = self.height, width = self.width, - many_colors = many_colors + nb_colors = nb_colors ) descr = [ s.strip().split(' ') for s in descr ] l = max([ len(s) for s in descr ]) + #descr = [ [ '' ] * (l - len(s)) + s for s in descr ] descr = [ s + [ '' ] * (l - len(s)) for s in descr ] return descr @@ -182,10 +196,9 @@ class TaskPicoCLVR(Task): self.token2id = dict([ (t, n) for n, t in enumerate(tokens) ]) self.id2token = dict([ (n, t) for n, t in enumerate(tokens) ]) - t = [ [ self.token2id[u] for u in s ] for s in self.train_descr ] - self.train_input = torch.tensor(t, device = self.device) - t = [ [ self.token2id[u] for u in s ] for s in self.test_descr ] - self.test_input = torch.tensor(t, device = self.device) + # Tokenize the train and test sets + self.train_input = descr2tensor(self.train_descr) + self.test_input = descr2tensor(self.test_descr) def batches(self, split = 'train'): assert split in { 'train', 'test' } @@ -199,31 +212,21 @@ class TaskPicoCLVR(Task): def vocabulary_size(self): return len(self.token2id) - def generate(self, primer, model, nb_tokens): - t_primer = primer.strip().split(' ') - t_generated = [ ] - - for j in range(nb_tokens): - t = [ [ self.token2id[u] for u in t_primer + t_generated ] ] - input = torch.tensor(t, device = self.device) - output = model(input) - logits = output[0, -1] - if args.synthesis_sampling: - dist = torch.distributions.categorical.Categorical(logits = logits) - t = dist.sample() - else: - t = logits.argmax() - t_generated.append(self.id2token[t.item()]) - - return ' '.join(t_primer + t_generated) + def generate(self, descr_primer, model, nb_tokens): + results = autoregression( + model, self.batch_size, + 1, nb_tokens, primer = descr2tensor(descr_primer), + device = self.device + ) + return ' '.join([ self.id2token[t.item()] for t in results.flatten() ]) def produce_results(self, n_epoch, model, nb_tokens = None): if nb_tokens is None: nb_tokens = self.height * self.width + 3 - descr = [ ] + result_descr = [ ] nb_per_primer = 8 - for primer in [ + for descr_primer in [ 'red above green green top blue right of red ', 'there is red there is yellow there is blue ', 'red below yellow yellow below green green below blue red right yellow left green right blue left ', @@ -231,9 +234,10 @@ class TaskPicoCLVR(Task): ]: for k in range(nb_per_primer): - descr.append(self.generate(primer, model, nb_tokens)) + result_descr.append(self.generate(descr_primer, model, nb_tokens)) - img = [ picoclvr.descr2img(d, height = self.height, width = self.width) for d in descr ] + img = [ picoclvr.descr2img(d, height = self.height, width = self.width) + for d in result_descr ] img = torch.cat(img, 0) image_name = f'result_picoclvr_{n_epoch:04d}.png' torchvision.utils.save_image( @@ -242,14 +246,14 @@ class TaskPicoCLVR(Task): ) log_string(f'wrote {image_name}') - nb_missing = sum( [ - x[2] for x in picoclvr.nb_missing_properties( - descr, - height = self.height, width = self.width - ) - ] ) + np = picoclvr.nb_properties( + result_descr, + height = self.height, width = self.width + ) - log_string(f'nb_missing {nb_missing / len(descr):.02f}') + nb_requested_properties, _, nb_missing_properties = zip(*np) + + log_string(f'nb_requested_properties {sum(nb_requested_properties) / len(result_descr):.02f} nb_missing_properties {sum(nb_missing_properties) / len(result_descr):.02f}') ###################################################################### @@ -333,14 +337,15 @@ class TaskWiki103(Task): for j in range(nb_tokens): input = self.tensorize([ t_primer + t_generated ]).to(self.device) + input = F.pad(input, (0, 1)) # Add the next token, the one to predict output = model(input) logits = output[0, -1] if args.synthesis_sampling: dist = torch.distributions.categorical.Categorical(logits = logits) - t = dist.sample() + t_next = dist.sample() else: - t = logits.argmax() - t_generated.append(self.vocab.lookup_token(t)) + t_next = logits.argmax() + t_generated.append(self.vocab.lookup_token(t_next)) if t_generated[-1] == '': break s = ' '.join(t_generated) @@ -373,18 +378,7 @@ class TaskMNIST(Task): return 256 def produce_results(self, n_epoch, model, nb_samples = 64): - results = torch.zeros(nb_samples, 28 * 28, dtype = torch.int64, device = self.device) - for input in results.split(self.batch_size): - for s in tqdm.tqdm(range(input.size(1)), desc = 'synth'): - output = model(input) - logits = output[:, s] - if args.synthesis_sampling: - dist = torch.distributions.categorical.Categorical(logits = logits) - t = dist.sample() - else: - t = logits.argmax(1) - input[:, s] = t - + results = autoregression(model, self.batch_size, nb_samples, 28 * 28, device = self.device) image_name = f'result_mnist_{n_epoch:04d}.png' torchvision.utils.save_image(1 - results.reshape(-1, 1, 28, 28) / 255., image_name, nrow = 16, pad_value = 0.8) @@ -405,7 +399,7 @@ elif args.data == 'picoclvr': task = TaskPicoCLVR(batch_size = args.batch_size, height = args.picoclvr_height, width = args.picoclvr_width, - many_colors = args.picoclvr_many_colors, + nb_colors = args.picoclvr_nb_colors, device = device) else: raise ValueError(f'Unknown dataset {args.data}.') @@ -443,7 +437,7 @@ else: nb_epochs_finished = 0 if args.no_checkpoint: - log_string(f'Not trying to load checkpoint.') + log_string(f'not trying to load checkpoint.') else: try: @@ -451,13 +445,13 @@ else: nb_epochs_finished = checkpoint['nb_epochs_finished'] model.load_state_dict(checkpoint['model_state']) optimizer.load_state_dict(checkpoint['optimizer_state']) - log_string(f'Checkpoint loaded with {nb_epochs_finished} epochs finished.') + log_string(f'checkpoint loaded with {nb_epochs_finished} epochs finished.') except FileNotFoundError: - log_string('Starting from scratch.') + log_string('starting from scratch.') except: - log_string('Error when loading the checkpoint.') + log_string('error when loading the checkpoint.') exit(1) ###################################################################### @@ -468,9 +462,9 @@ token_count = 0 for input in task.batches(split = 'train'): token_count += F.one_hot(input, num_classes = task.vocabulary_size()).sum((0, 1)) token_probas = token_count / token_count.sum() -h = -torch.xlogy(token_probas, token_probas).sum() -train_set_perplexity = math.exp(h) -log_string(f'Train set perplexity {train_set_perplexity}') +entropy = -torch.xlogy(token_probas, token_probas).sum() +train_set_perplexity = math.exp(entropy) +#log_string(f'train set perplexity {train_set_perplexity}') for k in range(nb_epochs_finished, nb_epochs): @@ -505,7 +499,7 @@ for k in range(nb_epochs_finished, nb_epochs): train_perplexity = math.exp(min(100, acc_train_loss/nb_train_samples)) test_perplexity = math.exp(min(100, acc_test_loss/nb_test_samples)) - log_string(f'perplexity {k} train {train_perplexity} test {test_perplexity}') + log_string(f'perplexity {k} train_set {train_set_perplexity} train_prediction {train_perplexity} test_prediction {test_perplexity}') task.produce_results(k, model)