X-Git-Url: https://fleuret.org/cgi-bin/gitweb/gitweb.cgi?p=pysvrt.git;a=blobdiff_plain;f=cnn-svrt.py;h=5dc91c82e66e99322ee77ec95e6e8c4b337dcdff;hp=ab1b363a2b9dac04b28d02f975197008fe71fad6;hb=15f2d2cf0a655234cfa435789e26238b95f5a371;hpb=7a46506f936bad2e136424b68cbd92890d46830c diff --git a/cnn-svrt.py b/cnn-svrt.py index ab1b363..5dc91c8 100755 --- a/cnn-svrt.py +++ b/cnn-svrt.py @@ -23,9 +23,12 @@ import time import argparse +import math from colorama import Fore, Back, Style +# Pytorch + import torch from torch import optim @@ -35,59 +38,71 @@ from torch import nn from torch.nn import functional as fn from torchvision import datasets, transforms, utils -import svrt +# SVRT + +from vignette_set import VignetteSet, CompressedVignetteSet ###################################################################### parser = argparse.ArgumentParser( - description = 'Simple convnet test on the SVRT.', + description = "Convolutional networks for the SVRT. Written by Francois Fleuret, (C) Idiap research institute.", formatter_class = argparse.ArgumentDefaultsHelpFormatter ) -parser.add_argument('--nb_train_batches', - type = int, default = 1000, - help = 'How many samples for train') +parser.add_argument('--nb_train_samples', + type = int, default = 100000) -parser.add_argument('--nb_test_batches', - type = int, default = 100, - help = 'How many samples for test') +parser.add_argument('--nb_test_samples', + type = int, default = 10000) parser.add_argument('--nb_epochs', - type = int, default = 50, - help = 'How many training epochs') + type = int, default = 50) parser.add_argument('--batch_size', - type = int, default = 100, - help = 'Mini-batch size') + type = int, default = 100) parser.add_argument('--log_file', - type = str, default = 'cnn-svrt.log', - help = 'Log file name') + type = str, default = 'default.log') + +parser.add_argument('--compress_vignettes', + action='store_true', default = True, + help = 'Use lossless compression to reduce the memory footprint') + +parser.add_argument('--deep_model', + action='store_true', default = True, + help = 'Use Afroze\'s Alexnet-like deep model') + +parser.add_argument('--test_loaded_models', + action='store_true', default = False, + help = 'Should we compute the test errors of loaded models') args = parser.parse_args() ###################################################################### log_file = open(args.log_file, 'w') +pred_log_t = None print(Fore.RED + 'Logging into ' + args.log_file + Style.RESET_ALL) -def log_string(s): - s = Fore.GREEN + time.ctime() + Style.RESET_ALL + ' ' + s - log_file.write(s + '\n') - log_file.flush() - print(s) - -###################################################################### +# Log and prints the string, with a time stamp. Does not log the +# remark +def log_string(s, remark = ''): + global pred_log_t -def generate_set(p, n): - target = torch.LongTensor(n).bernoulli_(0.5) t = time.time() - input = svrt.generate_vignettes(p, target) - t = time.time() - t - log_string('data_set_generation {:.02f} sample/s'.format(n / t)) - input = input.view(input.size(0), 1, input.size(1), input.size(2)).float() - return Variable(input), Variable(target) + + if pred_log_t is None: + elapsed = 'start' + else: + elapsed = '+{:.02f}s'.format(t - pred_log_t) + + pred_log_t = t + + log_file.write('[' + time.ctime() + '] ' + elapsed + ' ' + s + '\n') + log_file.flush() + + print(Fore.BLUE + '[' + time.ctime() + '] ' + Fore.GREEN + elapsed + Style.RESET_ALL + ' ' + s + Fore.CYAN + remark + Style.RESET_ALL) ###################################################################### @@ -113,6 +128,7 @@ class AfrozeShallowNet(nn.Module): self.conv3 = nn.Conv2d(16, 120, kernel_size=18) self.fc1 = nn.Linear(120, 84) self.fc2 = nn.Linear(84, 2) + self.name = 'shallownet' def forward(self, x): x = fn.relu(fn.max_pool2d(self.conv1(x), kernel_size=2)) @@ -123,8 +139,74 @@ class AfrozeShallowNet(nn.Module): x = self.fc2(x) return x -def train_model(model, train_input, train_target): - bs = args.batch_size +###################################################################### + +# Afroze's DeepNet + +# map size nb. maps +# ---------------------- +# input 128x128 1 +# -- conv(21x21 x 32 stride=4) -> 28x28 32 +# -- max(2x2) -> 14x14 6 +# -- conv(7x7 x 96) -> 8x8 16 +# -- max(2x2) -> 4x4 16 +# -- conv(5x5 x 96) -> 26x36 16 +# -- conv(3x3 x 128) -> 36x36 16 +# -- conv(3x3 x 128) -> 36x36 16 + +# -- conv(5x5 x 120) -> 1x1 120 +# -- reshape -> 120 1 +# -- full(3x84) -> 84 1 +# -- full(84x2) -> 2 1 + +class AfrozeDeepNet(nn.Module): + def __init__(self): + super(AfrozeDeepNet, self).__init__() + self.conv1 = nn.Conv2d( 1, 32, kernel_size=7, stride=4, padding=3) + self.conv2 = nn.Conv2d( 32, 96, kernel_size=5, padding=2) + self.conv3 = nn.Conv2d( 96, 128, kernel_size=3, padding=1) + self.conv4 = nn.Conv2d(128, 128, kernel_size=3, padding=1) + self.conv5 = nn.Conv2d(128, 96, kernel_size=3, padding=1) + self.fc1 = nn.Linear(1536, 256) + self.fc2 = nn.Linear(256, 256) + self.fc3 = nn.Linear(256, 2) + self.name = 'deepnet' + + def forward(self, x): + x = self.conv1(x) + x = fn.max_pool2d(x, kernel_size=2) + x = fn.relu(x) + + x = self.conv2(x) + x = fn.max_pool2d(x, kernel_size=2) + x = fn.relu(x) + + x = self.conv3(x) + x = fn.relu(x) + + x = self.conv4(x) + x = fn.relu(x) + + x = self.conv5(x) + x = fn.max_pool2d(x, kernel_size=2) + x = fn.relu(x) + + x = x.view(-1, 1536) + + x = self.fc1(x) + x = fn.relu(x) + + x = self.fc2(x) + x = fn.relu(x) + + x = self.fc3(x) + + return x + +###################################################################### + +def train_model(model, train_set): + batch_size = args.batch_size criterion = nn.CrossEntropyLoss() if torch.cuda.is_available(): @@ -132,31 +214,35 @@ def train_model(model, train_input, train_target): optimizer = optim.SGD(model.parameters(), lr = 1e-2) - for k in range(0, args.nb_epochs): + start_t = time.time() + + for e in range(0, args.nb_epochs): acc_loss = 0.0 - for b in range(0, train_input.size(0), bs): - output = model.forward(train_input.narrow(0, b, bs)) - loss = criterion(output, train_target.narrow(0, b, bs)) + for b in range(0, train_set.nb_batches): + input, target = train_set.get_batch(b) + output = model.forward(Variable(input)) + loss = criterion(output, Variable(target)) acc_loss = acc_loss + loss.data[0] model.zero_grad() loss.backward() optimizer.step() - log_string('train_loss {:d} {:f}'.format(k, acc_loss)) + dt = (time.time() - start_t) / (e + 1) + log_string('train_loss {:d} {:f}'.format(e + 1, acc_loss), + ' [ETA ' + time.ctime(time.time() + dt * (args.nb_epochs - e)) + ']') return model ###################################################################### -def nb_errors(model, data_input, data_target): - bs = args.batch_size - +def nb_errors(model, data_set): ne = 0 - for b in range(0, data_input.size(0), bs): - output = model.forward(data_input.narrow(0, b, bs)) + for b in range(0, data_set.nb_batches): + input, target = data_set.get_batch(b) + output = model.forward(Variable(input)) wta_prediction = output.data.max(1)[1].view(-1) - for i in range(0, bs): - if wta_prediction[i] != data_target.narrow(0, b, bs).data[i]: + for i in range(0, data_set.batch_size): + if wta_prediction[i] != target[i]: ne = ne + 1 return ne @@ -166,54 +252,105 @@ def nb_errors(model, data_input, data_target): for arg in vars(args): log_string('argument ' + str(arg) + ' ' + str(getattr(args, arg))) +###################################################################### + +def int_to_suffix(n): + if n > 1000000 and n%1000000 == 0: + return str(n//1000000) + 'M' + elif n > 1000 and n%1000 == 0: + return str(n//1000) + 'K' + else: + return str(n) + +###################################################################### + +if args.nb_train_samples%args.batch_size > 0 or args.nb_test_samples%args.batch_size > 0: + print('The number of samples must be a multiple of the batch size.') + raise + for problem_number in range(1, 24): - train_input, train_target = generate_set(problem_number, - args.nb_train_batches * args.batch_size) - test_input, test_target = generate_set(problem_number, - args.nb_test_batches * args.batch_size) - model = AfrozeShallowNet() + + log_string('**** problem ' + str(problem_number) + ' ****') + + if args.deep_model: + model = AfrozeDeepNet() + else: + model = AfrozeShallowNet() if torch.cuda.is_available(): - train_input, train_target = train_input.cuda(), train_target.cuda() - test_input, test_target = test_input.cuda(), test_target.cuda() model.cuda() - mu, std = train_input.data.mean(), train_input.data.std() - train_input.data.sub_(mu).div_(std) - test_input.data.sub_(mu).div_(std) + model_filename = model.name + '_' + \ + str(problem_number) + '_' + \ + int_to_suffix(args.nb_train_samples) + '.param' nb_parameters = 0 - for p in model.parameters(): - nb_parameters += p.numel() + for p in model.parameters(): nb_parameters += p.numel() log_string('nb_parameters {:d}'.format(nb_parameters)) - model_filename = 'model_' + str(problem_number) + '.param' - + need_to_train = False try: model.load_state_dict(torch.load(model_filename)) log_string('loaded_model ' + model_filename) except: - log_string('training_model') - train_model(model, train_input, train_target) + need_to_train = True + + if need_to_train: + + log_string('training_model ' + model_filename) + + t = time.time() + + if args.compress_vignettes: + train_set = CompressedVignetteSet(problem_number, + args.nb_train_samples, args.batch_size, + cuda = torch.cuda.is_available()) + else: + train_set = VignetteSet(problem_number, + args.nb_train_samples, args.batch_size, + cuda = torch.cuda.is_available()) + + log_string('data_generation {:0.2f} samples / s'.format( + train_set.nb_samples / (time.time() - t)) + ) + + train_model(model, train_set) torch.save(model.state_dict(), model_filename) log_string('saved_model ' + model_filename) - nb_train_errors = nb_errors(model, train_input, train_target) + nb_train_errors = nb_errors(model, train_set) + + log_string('train_error {:d} {:.02f}% {:d} {:d}'.format( + problem_number, + 100 * nb_train_errors / train_set.nb_samples, + nb_train_errors, + train_set.nb_samples) + ) + + if need_to_train or args.test_loaded_models: + + t = time.time() + + if args.compress_vignettes: + test_set = CompressedVignetteSet(problem_number, + args.nb_test_samples, args.batch_size, + cuda = torch.cuda.is_available()) + else: + test_set = VignetteSet(problem_number, + args.nb_test_samples, args.batch_size, + cuda = torch.cuda.is_available()) - log_string('train_error {:d} {:.02f}% {:d} {:d}'.format( - problem_number, - 100 * nb_train_errors / train_input.size(0), - nb_train_errors, - train_input.size(0)) - ) + log_string('data_generation {:0.2f} samples / s'.format( + test_set.nb_samples / (time.time() - t)) + ) - nb_test_errors = nb_errors(model, test_input, test_target) + nb_test_errors = nb_errors(model, test_set) - log_string('test_error {:d} {:.02f}% {:d} {:d}'.format( - problem_number, - 100 * nb_test_errors / test_input.size(0), - nb_test_errors, - test_input.size(0)) - ) + log_string('test_error {:d} {:.02f}% {:d} {:d}'.format( + problem_number, + 100 * nb_test_errors / test_set.nb_samples, + nb_test_errors, + test_set.nb_samples) + ) ######################################################################