+######################################################################
+
+def int_to_suffix(n):
+ if n >= 1000000 and n%1000000 == 0:
+ return str(n//1000000) + 'M'
+ elif n >= 1000 and n%1000 == 0:
+ return str(n//1000) + 'K'
+ else:
+ return str(n)
+
+class vignette_logger():
+ def __init__(self, delay_min = 60):
+ self.start_t = time.time()
+ self.last_t = self.start_t
+ self.delay_min = delay_min
+
+ def __call__(self, n, m):
+ t = time.time()
+ if t > self.last_t + self.delay_min:
+ dt = (t - self.start_t) / m
+ log_string('sample_generation {:d} / {:d}'.format(
+ m,
+ n), ' [ETA ' + time.ctime(time.time() + dt * (n - m)) + ']'
+ )
+ self.last_t = t
+
+def save_examplar_vignettes(data_set, nb, name):
+ n = torch.randperm(data_set.nb_samples).narrow(0, 0, nb)
+
+ for k in range(0, nb):
+ b = n[k] // data_set.batch_size
+ m = n[k] % data_set.batch_size
+ i, t = data_set.get_batch(b)
+ i = i[m].float()
+ i.sub_(i.min())
+ i.div_(i.max())
+ if k == 0: patchwork = Tensor(nb, 1, i.size(1), i.size(2))
+ patchwork[k].copy_(i)
+
+ torchvision.utils.save_image(patchwork, name)
+
+######################################################################
+
+if args.nb_train_samples%args.batch_size > 0 or args.nb_test_samples%args.batch_size > 0:
+ print('The number of samples must be a multiple of the batch size.')
+ raise
+
+log_string('############### start ###############')
+
+if args.compress_vignettes:
+ log_string('using_compressed_vignettes')
+ VignetteSet = svrtset.CompressedVignetteSet
+else:
+ log_string('using_uncompressed_vignettes')
+ VignetteSet = svrtset.VignetteSet
+
+for problem_number in map(int, args.problems.split(',')):
+
+ log_string('############### problem ' + str(problem_number) + ' ###############')
+
+ if args.deep_model:
+ model = AfrozeDeepNet()