X-Git-Url: https://fleuret.org/cgi-bin/gitweb/gitweb.cgi?a=blobdiff_plain;f=main.py;h=c1f4dc7f0540e2dcbbdf8c71b9a3c1ca29db457b;hb=f91736e6e56152746b3c44342748b70ad1c89888;hp=0323d0218ec587066817d5359044176cab99692b;hpb=76671c582f029aa67fce2626764b02e8d9e2dbeb;p=picoclvr.git diff --git a/main.py b/main.py index 0323d02..c1f4dc7 100755 --- a/main.py +++ b/main.py @@ -37,7 +37,7 @@ parser.add_argument( parser.add_argument("--log_filename", type=str, default="train.log", help=" ") -parser.add_argument("--result_dir", type=str, default="results_default") +parser.add_argument("--result_dir", type=str, default=None) parser.add_argument("--seed", type=int, default=0) @@ -45,9 +45,9 @@ parser.add_argument("--nb_epochs", type=int, default=None) parser.add_argument("--batch_size", type=int, default=None) -parser.add_argument("--nb_train_samples", type=int, default=250000) +parser.add_argument("--nb_train_samples", type=int, default=None) -parser.add_argument("--nb_test_samples", type=int, default=10000) +parser.add_argument("--nb_test_samples", type=int, default=None) parser.add_argument("--optim", type=str, default="adam") @@ -109,11 +109,13 @@ parser.add_argument("--snake_length", type=int, default=200) ############################## # Snake options -parser.add_argument("--stack_nb_steps", type=int, default=25) +parser.add_argument("--stack_nb_steps", type=int, default=100) parser.add_argument("--stack_nb_stacks", type=int, default=1) -parser.add_argument("--stack_nb_values", type=int, default=10) +parser.add_argument("--stack_nb_digits", type=int, default=3) + +parser.add_argument("--stack_fraction_values_for_train", type=float, default=None) ###################################################################### @@ -121,22 +123,8 @@ args = parser.parse_args() assert args.picocvlr_prune_properties in {"none", "train+eval", "eval"} -try: - os.mkdir(args.result_dir) -except FileExistsError: - if not args.overwrite_results: - print(f"result directory {args.result_dir} already exists") - exit(1) - -log_file = open(os.path.join(args.result_dir, args.log_filename), "a") - -if args.seed >= 0: - # torch.backends.cudnn.deterministic = True - # torch.backends.cudnn.benchmark = False - # torch.use_deterministic_algorithms(True) - torch.manual_seed(args.seed) - if torch.cuda.is_available(): - torch.cuda.manual_seed_all(args.seed) +if args.result_dir is None: + args.result_dir = f"results_{args.task}" ###################################################################### @@ -166,9 +154,9 @@ default_args = { "nb_test_samples": 10000, }, "stack": { - "nb_epochs": 25, + "nb_epochs": 5, "batch_size": 25, - "nb_train_samples": 10000, + "nb_train_samples": 100000, "nb_test_samples": 1000, }, } @@ -180,6 +168,25 @@ if args.task in default_args: ###################################################################### +try: + os.mkdir(args.result_dir) +except FileExistsError: + if not args.overwrite_results: + print(f"result directory {args.result_dir} already exists") + exit(1) + +log_file = open(os.path.join(args.result_dir, args.log_filename), "a") + +if args.seed >= 0: + # torch.backends.cudnn.deterministic = True + # torch.backends.cudnn.benchmark = False + # torch.use_deterministic_algorithms(True) + torch.manual_seed(args.seed) + if torch.cuda.is_available(): + torch.cuda.manual_seed_all(args.seed) + +###################################################################### + def log_string(s): t = time.strftime("%Y%m%d-%H:%M:%S ", time.localtime()) @@ -214,7 +221,7 @@ def masked_inplace_autoregression( # entropy[:,s]= p.xlogy(p).sum(1) / math.log(2) batches = zip(input.split(batch_size), ar_mask.split(batch_size)) if progress_bar_desc is not None: - tqdm.tqdm( + batches = tqdm.tqdm( batches, dynamic_ncols=True, desc=progress_bar_desc, @@ -702,14 +709,14 @@ class TaskMaze(Task): model, "train", nb_to_use=1000 ) log_string( - f"accuracy_train nb_total {train_nb_total} nb_correct {train_nb_correct} accuracy {(100.0*train_nb_correct)/train_nb_total:.02f}%" + f"accuracy_train {n_epoch} nb_total {train_nb_total} nb_correct {train_nb_correct} accuracy {(100.0*train_nb_correct)/train_nb_total:.02f}%" ) test_nb_total, test_nb_correct, count = self.compute_error( model, "test", nb_to_use=1000 ) log_string( - f"accuracy_test nb_total {test_nb_total} nb_correct {test_nb_correct} accuracy {(100.0*test_nb_correct)/test_nb_total:.02f}%" + f"accuracy_test {n_epoch} nb_total {test_nb_total} nb_correct {test_nb_correct} accuracy {(100.0*test_nb_correct)/test_nb_total:.02f}%" ) if count is not None: @@ -855,7 +862,7 @@ class TaskSnake(Task): ) log_string( - f"accuracy_test nb_total {test_nb_total} nb_correct {test_nb_correct} accuracy {(100.0*test_nb_correct)/test_nb_total:.02f}%" + f"accuracy_test {n_epoch} nb_total {test_nb_total} nb_correct {test_nb_correct} accuracy {(100.0*test_nb_correct)/test_nb_total:.02f}%" ) model.train(t) @@ -875,23 +882,48 @@ class TaskStack(Task): batch_size, nb_steps, nb_stacks, - nb_values, + nb_digits, + fraction_values_for_train=None, device=torch.device("cpu"), ): self.batch_size = batch_size self.nb_steps = nb_steps self.nb_stacks = nb_stacks - self.nb_values = nb_values + self.nb_digits = nb_digits self.device = device + if fraction_values_for_train is None: + values_for_train = None + values_for_test = None + else: + all = torch.randperm(10**nb_digits) + nb_for_train = int(all.size(0) * fraction_values_for_train) + values_for_train = all[:nb_for_train] + values_for_test = all[nb_for_train:] + self.train_input, self.train_stack_counts = stack.generate_sequences( - nb_train_samples, nb_steps, nb_stacks, nb_values, self.device + nb_train_samples, + nb_steps, + nb_stacks, + nb_digits, + values_for_train, + self.device, ) self.test_input, self.test_stack_counts = stack.generate_sequences( - nb_test_samples, nb_steps, nb_stacks, nb_values, self.device + nb_test_samples, + nb_steps, + nb_stacks, + nb_digits, + values_for_test, + self.device, ) + i = torch.logical_and(self.test_input % 2 == 1, self.test_input < 2 * nb_stacks) + counts = self.test_stack_counts.flatten()[i.flatten()] + counts = F.one_hot(counts).sum(0) + log_string(f"pop_stack_counts {counts}") + self.nb_codes = max(self.train_input.max(), self.test_input.max()) + 1 def batches(self, split="train", nb_to_use=-1, desc=None): @@ -916,28 +948,47 @@ class TaskStack(Task): def compute_nb_correct(input): result = input.clone() - stack.remove_poped_values(result,self.nb_stacks) + stack.remove_popped_values(result, self.nb_stacks, self.nb_digits) ar_mask = (result != input).long() - result *= 1 - ar_mask - masked_inplace_autoregression( model, self.batch_size, result, ar_mask, device=self.device ) - nb_total = ar_mask.sum() + errors = ((result != input).long() * ar_mask).reshape( + -1, 1 + self.nb_digits + ) + ar_mask = ar_mask.reshape(-1, 1 + self.nb_digits) - nb_correct = ( - (result == input).long() * ar_mask - ).sum() + nb_total = ar_mask.max(1).values.sum() + nb_correct = nb_total - errors.max(1).values.sum() return nb_total, nb_correct test_nb_total, test_nb_correct = compute_nb_correct(self.test_input[:1000]) log_string( - f"accuracy_test nb_total {test_nb_total} nb_correct {test_nb_correct} accuracy {(100.0*test_nb_correct)/test_nb_total:.02f}%" + f"accuracy_test {n_epoch} nb_total {test_nb_total} nb_correct {test_nb_correct} accuracy {(100.0*test_nb_correct)/test_nb_total:.02f}%" ) + ############################################################## + # Log a few generated sequences + input = self.test_input[:10, : 12 * (1 + self.nb_digits)] + result = input.clone() + stack.remove_popped_values(result, self.nb_stacks, self.nb_digits) + ar_mask = (result != input).long() + for n in range(result.size(0)): + log_string( + f"test_before {stack.seq_to_str(result[n],nb_stacks=self.nb_stacks,nb_digits=self.nb_digits)}" + ) + masked_inplace_autoregression( + model, self.batch_size, result, ar_mask, device=self.device + ) + for n in range(result.size(0)): + log_string( + f"test_after {stack.seq_to_str(result[n],nb_stacks=self.nb_stacks,nb_digits=self.nb_digits)}" + ) + ############################################################## + model.train(t) @@ -1010,9 +1061,10 @@ elif args.task == "stack": nb_train_samples=args.nb_train_samples, nb_test_samples=args.nb_test_samples, batch_size=args.batch_size, - nb_steps = args.stack_nb_steps, - nb_stacks = args.stack_nb_stacks, - nb_values = args.stack_nb_values, + nb_steps=args.stack_nb_steps, + nb_stacks=args.stack_nb_stacks, + nb_digits=args.stack_nb_digits, + fraction_values_for_train=args.stack_fraction_values_for_train, device=device, )