import time
import argparse
import math
+import distutils.util
from colorama import Fore, Back, Style
type = str, default = 'default.log')
parser.add_argument('--compress_vignettes',
- action='store_true', default = True,
+ type = distutils.util.strtobool, default = 'True',
help = 'Use lossless compression to reduce the memory footprint')
parser.add_argument('--deep_model',
- action='store_true', default = True,
+ type = distutils.util.strtobool, default = 'True',
help = 'Use Afroze\'s Alexnet-like deep model')
parser.add_argument('--test_loaded_models',
- action='store_true', default = False,
+ type = distutils.util.strtobool, default = 'False',
help = 'Should we compute the test errors of loaded models')
args = parser.parse_args()
######################################################################
def int_to_suffix(n):
- if n > 1000000 and n%1000000 == 0:
+ if n >= 1000000 and n%1000000 == 0:
return str(n//1000000) + 'M'
- elif n > 1000 and n%1000 == 0:
+ elif n >= 1000 and n%1000 == 0:
return str(n//1000) + 'K'
else:
return str(n)
raise
if args.compress_vignettes:
+ log_string('using_compressed_vignettes')
VignetteSet = vignette_set.CompressedVignetteSet
else:
+ log_string('using_uncompressed_vignettes')
VignetteSet = vignette_set.VignetteSet
for problem_number in range(1, 24):
if torch.cuda.is_available(): model.cuda()
- model_filename = model.name + '_' + \
- str(problem_number) + '_' + \
+ model_filename = model.name + '_pb:' + \
+ str(problem_number) + '_ns:' + \
int_to_suffix(args.nb_train_samples) + '.param'
nb_parameters = 0