X-Git-Url: https://fleuret.org/cgi-bin/gitweb/gitweb.cgi?a=blobdiff_plain;f=main.py;h=305bd3cfb75a351d1b8513dcf9ce136c7a55844f;hb=2ac9d1299a84f96228f49fbdac02d5a7017445e5;hp=df3f15453c7002c8799a6ce94f3220e88e961fc3;hpb=046c2b8633a415e533ec14cb72d77845f0c3e85f;p=picoclvr.git diff --git a/main.py b/main.py index df3f154..305bd3c 100755 --- a/main.py +++ b/main.py @@ -35,7 +35,7 @@ parser.add_argument( "--task", type=str, default="picoclvr", - help="picoclvr, mnist, maze, snake, stack, expr", + help="picoclvr, mnist, maze, snake, stack, expr, world", ) parser.add_argument("--log_filename", type=str, default="train.log", help=" ") @@ -110,7 +110,7 @@ parser.add_argument("--snake_nb_colors", type=int, default=5) parser.add_argument("--snake_length", type=int, default=200) ############################## -# Snake options +# Stack options parser.add_argument("--stack_nb_steps", type=int, default=100) @@ -125,7 +125,18 @@ parser.add_argument("--stack_fraction_values_for_train", type=float, default=0.7 parser.add_argument("--expr_nb_variables", type=int, default=5) -parser.add_argument("--expr_sequence_length", type=int, default=30) +parser.add_argument("--expr_sequence_length", type=int, default=40) + +parser.add_argument("--expr_operand_max", type=int, default=9) + +parser.add_argument("--expr_result_max", type=int, default=99) + +parser.add_argument("--expr_input_file", type=str, default=None) + +############################## +# World options + +parser.add_argument("--world_vqae_nb_epochs", type=int, default=25) ###################################################################### @@ -170,11 +181,17 @@ default_args = { "nb_test_samples": 1000, }, "expr": { - "nb_epochs": 50, + "nb_epochs": 40, "batch_size": 25, - "nb_train_samples": 250000, + "nb_train_samples": 1000000, "nb_test_samples": 10000, }, + "world": { + "nb_epochs": 10, + "batch_size": 25, + "nb_train_samples": 125000, + "nb_test_samples": 1000, + }, } if args.task in default_args: @@ -305,7 +322,19 @@ elif args.task == "expr": nb_test_samples=args.nb_test_samples, nb_variables=args.expr_nb_variables, sequence_length=args.expr_sequence_length, + operand_max=args.expr_operand_max, + result_max=args.expr_result_max, + batch_size=args.batch_size, + device=device, + ) + +elif args.task == "world": + task = tasks.World( + nb_train_samples=args.nb_train_samples, + nb_test_samples=args.nb_test_samples, batch_size=args.batch_size, + vqae_nb_epochs=args.world_vqae_nb_epochs, + logger=log_string, device=device, ) @@ -366,6 +395,20 @@ else: ###################################################################### +if args.task == "expr" and args.expr_input_file is not None: + task.produce_results( + nb_epochs_finished, + model, + args.result_dir, + log_string, + args.deterministic_synthesis, + args.expr_input_file, + ) + + exit(0) + +###################################################################### + nb_epochs = args.nb_epochs if args.nb_epochs > 0 else nb_epochs_default # Compute the entropy of the training tokens @@ -383,20 +426,28 @@ train_set_perplexity = math.exp(entropy) train_examples = {} + for input in task.batches(split="train"): assert input.dim() == 2 and input.dtype == torch.int64 for x in input: train_examples[x.sum().item()] = x +nb_total, nb_collisions = 0, 0 for input in task.batches(split="test"): assert input.dim() == 2 and input.dtype == torch.int64 for x in input: + nb_total += 1 y = train_examples.get(x.sum().item()) if y is not None: - assert x.size() != y.size() or (x - y).abs().sum() > 0 + if x.size() == y.size() and (x - y).abs().sum() == 0: + nb_collisions += 1 del train_examples +log_string( + f"data_check {nb_collisions*100/nb_total:.02f}% ({nb_collisions}/{nb_total}) of test samples are in the train set" +) + ############################## if args.learning_rate_schedule == "cos":